The following issues were found

torch/optim/lbfgs.py
72 issues
Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              import torch
from functools import reduce
from .optimizer import Optimizer


def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None):
    # ported from https://github.com/torch/optim/blob/master/polyinterp.lua
    # Compute bounds of interpolation area
    if bounds is not None:

            

Reported by Pylint.

Module 'torch' has no 'contiguous_format' member
Error

Line: 47 Column: 31

                                max_ls=25):
    # ported from https://github.com/torch/optim/blob/master/lswolfe.lua
    d_norm = d.abs().max()
    g = g.clone(memory_format=torch.contiguous_format)
    # evaluate objective and gradient using initial step
    f_new, g_new = obj_func(x, t, d)
    ls_func_evals = 1
    gtd_new = g_new.dot(d)


            

Reported by Pylint.

Module 'torch' has no 'contiguous_format' member
Error

Line: 62 Column: 60

                      if f_new > (f + c1 * t * gtd) or (ls_iter > 1 and f_new >= f_prev):
            bracket = [t_prev, t]
            bracket_f = [f_prev, f_new]
            bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)]
            bracket_gtd = [gtd_prev, gtd_new]
            break

        if abs(gtd_new) <= -c2 * gtd:
            bracket = [t]

            

Reported by Pylint.

Module 'torch' has no 'contiguous_format' member
Error

Line: 76 Column: 60

                      if gtd_new >= 0:
            bracket = [t_prev, t]
            bracket_f = [f_prev, f_new]
            bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)]
            bracket_gtd = [gtd_prev, gtd_new]
            break

        # interpolate
        min_step = t + 0.01 * (t - t_prev)

            

Reported by Pylint.

Module 'torch' has no 'contiguous_format' member
Error

Line: 96 Column: 44

                      # next step
        t_prev = tmp
        f_prev = f_new
        g_prev = g_new.clone(memory_format=torch.contiguous_format)
        gtd_prev = gtd_new
        f_new, g_new = obj_func(x, t, d)
        ls_func_evals += 1
        gtd_new = g_new.dot(d)
        ls_iter += 1

            

Reported by Pylint.

Module 'torch' has no 'contiguous_format' member
Error

Line: 156 Column: 61

                          # Armijo condition not satisfied or not lower than lowest point
            bracket[high_pos] = t
            bracket_f[high_pos] = f_new
            bracket_g[high_pos] = g_new.clone(memory_format=torch.contiguous_format)
            bracket_gtd[high_pos] = gtd_new
            low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[1] else (1, 0)
        else:
            if abs(gtd_new) <= -c2 * gtd:
                # Wolfe conditions satisfied

            

Reported by Pylint.

Module 'torch' has no 'contiguous_format' member
Error

Line: 173 Column: 60

                          # new point becomes new low
            bracket[low_pos] = t
            bracket_f[low_pos] = f_new
            bracket_g[low_pos] = g_new.clone(memory_format=torch.contiguous_format)
            bracket_gtd[low_pos] = gtd_new

    # return stuff
    t = bracket[low_pos]
    f_new = bracket_f[low_pos]

            

Reported by Pylint.

Module 'torch' has no 'cat' member
Error

Line: 257 Column: 16

                          else:
                view = p.grad.view(-1)
            views.append(view)
        return torch.cat(views, 0)

    def _add_grad(self, step_size, update):
        offset = 0
        for p in self._params:
            numel = p.numel()

            

Reported by Pylint.

Module 'torch' has no 'contiguous_format' member
Error

Line: 269 Column: 39

                      assert offset == self._numel()

    def _clone_param(self):
        return [p.clone(memory_format=torch.contiguous_format) for p in self._params]

    def _set_param(self, params_data):
        for p, pdata in zip(self._params, params_data):
            p.copy_(pdata)


            

Reported by Pylint.

Module 'torch' has no 'mul' member
Error

Line: 386 Column: 25

              
                # multiply by initial Hessian
                # r/d is the final direction
                d = r = torch.mul(q, H_diag)
                for i in range(num_old):
                    be_i = old_dirs[i].dot(r) * ro[i]
                    r.add_(old_stps[i], alpha=al[i] - be_i)

            if prev_flat_grad is None:

            

Reported by Pylint.

test/distributed/pipeline/sync/test_stream.py
72 issues
Unable to import 'pytest'
Error

Line: 7 Column: 1

              #
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch

from torch.distributed.pipeline.sync.stream import (
    CPUStream,
    current_stream,

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 8 Column: 1

              # This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch

from torch.distributed.pipeline.sync.stream import (
    CPUStream,
    current_stream,
    default_stream,

            

Reported by Pylint.

Unable to import 'torch.distributed.pipeline.sync.stream'
Error

Line: 10 Column: 1

              import pytest
import torch

from torch.distributed.pipeline.sync.stream import (
    CPUStream,
    current_stream,
    default_stream,
    get_device,
    is_cuda,

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch


            

Reported by Pylint.

Missing class docstring
Error

Line: 26 Column: 1

              skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")


class TestNewStream:
    def test_new_stream_cpu(self):
        stream = new_stream(torch.device("cpu"))
        assert stream is CPUStream

    @skip_if_no_cuda

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 27 Column: 5

              

class TestNewStream:
    def test_new_stream_cpu(self):
        stream = new_stream(torch.device("cpu"))
        assert stream is CPUStream

    @skip_if_no_cuda
    def test_new_stream_cuda(self):

            

Reported by Pylint.

Method could be a function
Error

Line: 27 Column: 5

              

class TestNewStream:
    def test_new_stream_cpu(self):
        stream = new_stream(torch.device("cpu"))
        assert stream is CPUStream

    @skip_if_no_cuda
    def test_new_stream_cuda(self):

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 29
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

              class TestNewStream:
    def test_new_stream_cpu(self):
        stream = new_stream(torch.device("cpu"))
        assert stream is CPUStream

    @skip_if_no_cuda
    def test_new_stream_cuda(self):
        stream = new_stream(torch.device("cuda"))
        assert isinstance(stream, torch.cuda.Stream)

            

Reported by Bandit.

Method could be a function
Error

Line: 32 Column: 5

                      assert stream is CPUStream

    @skip_if_no_cuda
    def test_new_stream_cuda(self):
        stream = new_stream(torch.device("cuda"))
        assert isinstance(stream, torch.cuda.Stream)
        assert stream != torch.cuda.default_stream()



            

Reported by Pylint.

Missing function or method docstring
Error

Line: 32 Column: 5

                      assert stream is CPUStream

    @skip_if_no_cuda
    def test_new_stream_cuda(self):
        stream = new_stream(torch.device("cuda"))
        assert isinstance(stream, torch.cuda.Stream)
        assert stream != torch.cuda.default_stream()



            

Reported by Pylint.

caffe2/python/operator_test/sequence_ops_test.py
72 issues
Unable to import 'hypothesis'
Error

Line: 8 Column: 1

              
from caffe2.python import core
from functools import partial
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest

            

Reported by Pylint.

Unable to import 'hypothesis.strategies'
Error

Line: 11 Column: 1

              from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
from caffe2.python import workspace



            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 112 Column: 70

                         **hu.gcs)
    @settings(deadline=10000)
    def test_add_padding(
        self, start_pad_width, end_pad_width, args, ret_lengths, gc, dc
    ):
        lengths, data, start_padding, end_padding = args
        start_padding = np.array(start_padding, dtype=np.float32)
        end_padding = np.array(end_padding, dtype=np.float32)
        outputs = ['output', 'lengths_out'] if ret_lengths else ['output']

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 229 Column: 86

                         **hu.gcs)
    @settings(deadline=1000)
    def test_add_padding_shape_and_type(
        self, start_pad_width, end_pad_width, num_dims, num_groups, ret_lengths, gc, dc
    ):
        np.random.seed(666)
        lengths = []
        for _ in range(num_groups):
            lengths.append(np.random.randint(0, 3))

            

Reported by Pylint.

Unused argument 'gc'
Error

Line: 229 Column: 82

                         **hu.gcs)
    @settings(deadline=1000)
    def test_add_padding_shape_and_type(
        self, start_pad_width, end_pad_width, num_dims, num_groups, ret_lengths, gc, dc
    ):
        np.random.seed(666)
        lengths = []
        for _ in range(num_groups):
            lengths.append(np.random.randint(0, 3))

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 258 Column: 79

                         end_pad_width=st.integers(min_value=0, max_value=2),
           args=_gen_test_add_padding(with_pad_data=False),
           **hu.gcs)
    def test_add_zero_padding(self, start_pad_width, end_pad_width, args, gc, dc):
        lengths, data = args
        op = core.CreateOperator(
            'AddPadding',
            ['data', 'lengths'],
            ['output', 'lengths_out'],

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 276 Column: 84

                         end_pad_width=st.integers(min_value=0, max_value=2),
           data=hu.tensor(min_dim=1, max_dim=3),
           **hu.gcs)
    def test_add_padding_no_length(self, start_pad_width, end_pad_width, data, gc, dc):
        op = core.CreateOperator(
            'AddPadding',
            ['data'],
            ['output', 'output_lens'],
            padding_width=start_pad_width,

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 299 Column: 77

                         end_pad_width=st.integers(min_value=0, max_value=2),
           args=_gen_test_add_padding(with_pad_data=False, is_remove=True),
           **hu.gcs)
    def test_remove_padding(self, start_pad_width, end_pad_width, args, gc, dc):
        lengths, data = args
        op = core.CreateOperator(
            'RemovePadding',
            ['data', 'lengths'],
            ['output', 'lengths_out'],

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 318 Column: 77

                         args=_gen_test_add_padding(with_pad_data=True),
           **hu.gcs)
    @settings(deadline=10000)
    def test_gather_padding(self, start_pad_width, end_pad_width, args, gc, dc):
        lengths, data, start_padding, end_padding = args
        padded_data, padded_lengths = _add_padding_ref(
            start_pad_width, end_pad_width, True, data,
            lengths, start_padding, end_padding)
        op = core.CreateOperator(

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 341 Column: 50

                                        min_value=1, max_value=10),
                          **hu.gcs)
    @settings(deadline=10000)
    def test_reverse_packed_segs(self, data, gc, dc):
        max_length = data.shape[0]
        batch_size = data.shape[1]
        lengths = np.random.randint(max_length + 1, size=batch_size)

        op = core.CreateOperator(

            

Reported by Pylint.

test/test_cuda_primary_ctx.py
72 issues
Unable to import 'torch'
Error

Line: 1 Column: 1

              import torch
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm
import sys
import unittest

# NOTE: this needs to be run in a brand new process

# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 2 Column: 1

              import torch
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm
import sys
import unittest

# NOTE: this needs to be run in a brand new process

# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed

            

Reported by Pylint.

Access to a protected member _cuda_hasPrimaryContext of a client class
Error

Line: 30 Column: 30

                  def setUp(self):
        for device in range(torch.cuda.device_count()):
            # Ensure context has not been created beforehand
            self.assertFalse(torch._C._cuda_hasPrimaryContext(device), TestCudaPrimaryCtx.CTX_ALREADY_CREATED_ERR_MSG)

    @unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
    def test_str_repr(self):
        x = torch.randn(1, device='cuda:1')


            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 30 Column: 30

                  def setUp(self):
        for device in range(torch.cuda.device_count()):
            # Ensure context has not been created beforehand
            self.assertFalse(torch._C._cuda_hasPrimaryContext(device), TestCudaPrimaryCtx.CTX_ALREADY_CREATED_ERR_MSG)

    @unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
    def test_str_repr(self):
        x = torch.randn(1, device='cuda:1')


            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 37 Column: 26

                      x = torch.randn(1, device='cuda:1')

        # We should have only created context on 'cuda:1'
        self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
        self.assertTrue(torch._C._cuda_hasPrimaryContext(1))

        str(x)
        repr(x)


            

Reported by Pylint.

Access to a protected member _cuda_hasPrimaryContext of a client class
Error

Line: 37 Column: 26

                      x = torch.randn(1, device='cuda:1')

        # We should have only created context on 'cuda:1'
        self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
        self.assertTrue(torch._C._cuda_hasPrimaryContext(1))

        str(x)
        repr(x)


            

Reported by Pylint.

Access to a protected member _cuda_hasPrimaryContext of a client class
Error

Line: 38 Column: 25

              
        # We should have only created context on 'cuda:1'
        self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
        self.assertTrue(torch._C._cuda_hasPrimaryContext(1))

        str(x)
        repr(x)

        # We should still have only created context on 'cuda:1'

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 38 Column: 25

              
        # We should have only created context on 'cuda:1'
        self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
        self.assertTrue(torch._C._cuda_hasPrimaryContext(1))

        str(x)
        repr(x)

        # We should still have only created context on 'cuda:1'

            

Reported by Pylint.

Access to a protected member _cuda_hasPrimaryContext of a client class
Error

Line: 44 Column: 26

                      repr(x)

        # We should still have only created context on 'cuda:1'
        self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
        self.assertTrue(torch._C._cuda_hasPrimaryContext(1))

    @unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
    def test_copy(self):
        x = torch.randn(1, device='cuda:1')

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 44 Column: 26

                      repr(x)

        # We should still have only created context on 'cuda:1'
        self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
        self.assertTrue(torch._C._cuda_hasPrimaryContext(1))

    @unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
    def test_copy(self):
        x = torch.randn(1, device='cuda:1')

            

Reported by Pylint.

torch/jit/annotations.py
72 issues
Attempted relative import beyond top-level package
Error

Line: 8 Column: 1

              import builtins
import torch
import warnings
from .._jit_internal import List, Tuple, is_tuple, is_list, Dict, is_dict, Optional, \
    is_optional, _qualified_name, Any, Future, is_future, is_ignored_fn
from .._jit_internal import BroadcastingList1, BroadcastingList2, BroadcastingList3  # type: ignore[attr-defined]
from ._state import _get_script_class

from torch._C import TensorType, TupleType, FloatType, IntType, ComplexType, \

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 10 Column: 1

              import warnings
from .._jit_internal import List, Tuple, is_tuple, is_list, Dict, is_dict, Optional, \
    is_optional, _qualified_name, Any, Future, is_future, is_ignored_fn
from .._jit_internal import BroadcastingList1, BroadcastingList2, BroadcastingList3  # type: ignore[attr-defined]
from ._state import _get_script_class

from torch._C import TensorType, TupleType, FloatType, IntType, ComplexType, \
    ListType, StringType, DictType, BoolType, OptionalType, InterfaceType, AnyType, NoneType, \
    DeviceObjType, StreamObjType, FutureType, EnumType

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 11 Column: 1

              from .._jit_internal import List, Tuple, is_tuple, is_list, Dict, is_dict, Optional, \
    is_optional, _qualified_name, Any, Future, is_future, is_ignored_fn
from .._jit_internal import BroadcastingList1, BroadcastingList2, BroadcastingList3  # type: ignore[attr-defined]
from ._state import _get_script_class

from torch._C import TensorType, TupleType, FloatType, IntType, ComplexType, \
    ListType, StringType, DictType, BoolType, OptionalType, InterfaceType, AnyType, NoneType, \
    DeviceObjType, StreamObjType, FutureType, EnumType


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 23 Column: 5

              from typing import Type

if torch.distributed.rpc.is_available():
    from .._jit_internal import RRef, is_rref
    from torch._C import RRefType


class Module(object):
    def __init__(self, name, members):

            

Reported by Pylint.

Module 'torch' has no 'DoubleTensor' member
Error

Line: 283 Column: 43

                  if issubclass(ann, torch.Tensor):
        return True

    if issubclass(ann, (torch.LongTensor, torch.DoubleTensor, torch.FloatTensor,
                        torch.IntTensor, torch.ShortTensor, torch.HalfTensor,
                        torch.CharTensor, torch.ByteTensor, torch.BoolTensor)):
        warnings.warn("TorchScript will treat type annotations of Tensor "
                      "dtype-specific subtypes as if they are normal Tensors. "
                      "dtype constraints are not enforced in compilation either.")

            

Reported by Pylint.

Module 'torch' has no 'FloatTensor' member
Error

Line: 283 Column: 63

                  if issubclass(ann, torch.Tensor):
        return True

    if issubclass(ann, (torch.LongTensor, torch.DoubleTensor, torch.FloatTensor,
                        torch.IntTensor, torch.ShortTensor, torch.HalfTensor,
                        torch.CharTensor, torch.ByteTensor, torch.BoolTensor)):
        warnings.warn("TorchScript will treat type annotations of Tensor "
                      "dtype-specific subtypes as if they are normal Tensors. "
                      "dtype constraints are not enforced in compilation either.")

            

Reported by Pylint.

Module 'torch' has no 'LongTensor' member
Error

Line: 283 Column: 25

                  if issubclass(ann, torch.Tensor):
        return True

    if issubclass(ann, (torch.LongTensor, torch.DoubleTensor, torch.FloatTensor,
                        torch.IntTensor, torch.ShortTensor, torch.HalfTensor,
                        torch.CharTensor, torch.ByteTensor, torch.BoolTensor)):
        warnings.warn("TorchScript will treat type annotations of Tensor "
                      "dtype-specific subtypes as if they are normal Tensors. "
                      "dtype constraints are not enforced in compilation either.")

            

Reported by Pylint.

Module 'torch' has no 'ShortTensor' member
Error

Line: 284 Column: 42

                      return True

    if issubclass(ann, (torch.LongTensor, torch.DoubleTensor, torch.FloatTensor,
                        torch.IntTensor, torch.ShortTensor, torch.HalfTensor,
                        torch.CharTensor, torch.ByteTensor, torch.BoolTensor)):
        warnings.warn("TorchScript will treat type annotations of Tensor "
                      "dtype-specific subtypes as if they are normal Tensors. "
                      "dtype constraints are not enforced in compilation either.")
        return True

            

Reported by Pylint.

Module 'torch' has no 'IntTensor' member
Error

Line: 284 Column: 25

                      return True

    if issubclass(ann, (torch.LongTensor, torch.DoubleTensor, torch.FloatTensor,
                        torch.IntTensor, torch.ShortTensor, torch.HalfTensor,
                        torch.CharTensor, torch.ByteTensor, torch.BoolTensor)):
        warnings.warn("TorchScript will treat type annotations of Tensor "
                      "dtype-specific subtypes as if they are normal Tensors. "
                      "dtype constraints are not enforced in compilation either.")
        return True

            

Reported by Pylint.

Module 'torch' has no 'HalfTensor' member
Error

Line: 284 Column: 61

                      return True

    if issubclass(ann, (torch.LongTensor, torch.DoubleTensor, torch.FloatTensor,
                        torch.IntTensor, torch.ShortTensor, torch.HalfTensor,
                        torch.CharTensor, torch.ByteTensor, torch.BoolTensor)):
        warnings.warn("TorchScript will treat type annotations of Tensor "
                      "dtype-specific subtypes as if they are normal Tensors. "
                      "dtype constraints are not enforced in compilation either.")
        return True

            

Reported by Pylint.

torch/testing/_internal/codegen/random_topo_test.py
71 issues
Module 'torch' has no 'float' member
Error

Line: 24 Column: 9

              # tensor device
DEVICE = "cuda"
# data type for tensors
DTYPE = torch.float
# factor sorta control the depth of the model
GRAPH_FACTOR = 2

################################################################################
# helper functions

            

Reported by Pylint.

Module 'torch' has no 'relu' member
Error

Line: 80 Column: 40

                  num_sets = num_tensor
    candidate = list(range(num_tensor))

    unary_operations = [torch.sigmoid, torch.relu]
    binary_operations = [torch.add, torch.sub, torch.mul]
    u_op_size = len(unary_operations)
    b_op_size = len(binary_operations)

    num_operations = np.random.randint(num_sets - 1,

            

Reported by Pylint.

Module 'torch' has no 'sigmoid' member
Error

Line: 80 Column: 25

                  num_sets = num_tensor
    candidate = list(range(num_tensor))

    unary_operations = [torch.sigmoid, torch.relu]
    binary_operations = [torch.add, torch.sub, torch.mul]
    u_op_size = len(unary_operations)
    b_op_size = len(binary_operations)

    num_operations = np.random.randint(num_sets - 1,

            

Reported by Pylint.

Module 'torch' has no 'add' member
Error

Line: 81 Column: 26

                  candidate = list(range(num_tensor))

    unary_operations = [torch.sigmoid, torch.relu]
    binary_operations = [torch.add, torch.sub, torch.mul]
    u_op_size = len(unary_operations)
    b_op_size = len(binary_operations)

    num_operations = np.random.randint(num_sets - 1,
                                       num_sets * GRAPH_FACTOR)

            

Reported by Pylint.

Module 'torch' has no 'mul' member
Error

Line: 81 Column: 48

                  candidate = list(range(num_tensor))

    unary_operations = [torch.sigmoid, torch.relu]
    binary_operations = [torch.add, torch.sub, torch.mul]
    u_op_size = len(unary_operations)
    b_op_size = len(binary_operations)

    num_operations = np.random.randint(num_sets - 1,
                                       num_sets * GRAPH_FACTOR)

            

Reported by Pylint.

Module 'torch' has no 'sub' member; maybe 'hub'?
Error

Line: 81 Column: 37

                  candidate = list(range(num_tensor))

    unary_operations = [torch.sigmoid, torch.relu]
    binary_operations = [torch.add, torch.sub, torch.mul]
    u_op_size = len(unary_operations)
    b_op_size = len(binary_operations)

    num_operations = np.random.randint(num_sets - 1,
                                       num_sets * GRAPH_FACTOR)

            

Reported by Pylint.

Sequence index is not an int, slice, or instance with __index__
Error

Line: 94 Column: 20

                      # we start off with randomly pick a candidate and operation
        index = np.random.randint(0, len(candidate))
        op_index = np.random.randint(0, u_op_size + b_op_size)
        lh_index = candidate[index]
        rh_index = None
        out_tensor = None

        if DEBUG_PRINT:
            print("iteration {0}, num_sets{1}, candidates {2}, tensor_list {3}, lh_index {4}, op_index {5}".format(

            

Reported by Pylint.

Sequence index is not an int, slice, or instance with __index__
Error

Line: 105 Column: 30

                          num_operations -= 1
            if op_index < u_op_size:
                # unary operation, we just apply a random operation on candidate
                out_tensor = unary_operations[op_index](tensor_list[lh_index])
            else:
                # binary operation, we randomly choose the other operand:
                #   1. tensor on tensor operation -> rh_index
                #   2. tensor on const operation
                # we are not restricted to candidate tensor any more.

            

Reported by Pylint.

Sequence index is not an int, slice, or instance with __index__
Error

Line: 126 Column: 34

                                  # if np.random.randint(0, 2) > 0:
                    #  left = const_list[op_2_index - len(tensor_list)]
                    #  right = tensor_list[lh_index]
                    out_tensor = binary_operations[op_index - u_op_size](left, right)
                if DEBUG_PRINT:
                    print("binary, op_2_index {0}, rh_index ?{1}".format(op_2_index, rh_index))
        else:
            # binary operation, we just randomly pick two candidates.
            # this is not the most efficient way to close dependecy, as we could have

            

Reported by Pylint.

Sequence index is not an int, slice, or instance with __index__
Error

Line: 142 Column: 9

                              print("binary rh_index ?{0}".format(rh_index))

        # update candidate should happen before we remove rh_index
        candidate[index] = len(tensor_list)
        lh_root = get_root(lh_index, d_map)
        # [if rh_index: create binary operator output tensor]
        if rh_index is not None:

            out_tensor = binary_operations[op_index - u_op_size](

            

Reported by Pylint.

test/distributed/pipeline/sync/skip/test_verify_skippables.py
71 issues
Unable to import 'pytest'
Error

Line: 7 Column: 1

              #
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from torch import nn

from torch.distributed.pipeline.sync.skip import Namespace, skippable, verify_skippables



            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 8 Column: 1

              # This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from torch import nn

from torch.distributed.pipeline.sync.skip import Namespace, skippable, verify_skippables


def test_matching():

            

Reported by Pylint.

Unable to import 'torch.distributed.pipeline.sync.skip'
Error

Line: 10 Column: 1

              import pytest
from torch import nn

from torch.distributed.pipeline.sync.skip import Namespace, skippable, verify_skippables


def test_matching():
    @skippable(stash=["foo"])
    class Layer1(nn.Module):

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from torch import nn


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 13 Column: 1

              from torch.distributed.pipeline.sync.skip import Namespace, skippable, verify_skippables


def test_matching():
    @skippable(stash=["foo"])
    class Layer1(nn.Module):
        pass

    @skippable(pop=["foo"])

            

Reported by Pylint.

Missing class docstring
Error

Line: 15 Column: 5

              
def test_matching():
    @skippable(stash=["foo"])
    class Layer1(nn.Module):
        pass

    @skippable(pop=["foo"])
    class Layer2(nn.Module):
        pass

            

Reported by Pylint.

Too few public methods (0/2)
Error

Line: 15 Column: 5

              
def test_matching():
    @skippable(stash=["foo"])
    class Layer1(nn.Module):
        pass

    @skippable(pop=["foo"])
    class Layer2(nn.Module):
        pass

            

Reported by Pylint.

Too few public methods (0/2)
Error

Line: 19 Column: 5

                      pass

    @skippable(pop=["foo"])
    class Layer2(nn.Module):
        pass

    verify_skippables(nn.Sequential(Layer1(), Layer2()))



            

Reported by Pylint.

Missing class docstring
Error

Line: 19 Column: 5

                      pass

    @skippable(pop=["foo"])
    class Layer2(nn.Module):
        pass

    verify_skippables(nn.Sequential(Layer1(), Layer2()))



            

Reported by Pylint.

Missing function or method docstring
Error

Line: 25 Column: 1

                  verify_skippables(nn.Sequential(Layer1(), Layer2()))


def test_stash_not_pop():
    @skippable(stash=["foo"])
    class Layer1(nn.Module):
        pass

    with pytest.raises(TypeError) as e:

            

Reported by Pylint.

torch/nn/utils/prune.py
71 issues
Module 'torch' has no 'ones_like' member
Error

Line: 176 Column: 28

                          module.register_parameter(name + "_orig", orig)
            # temporarily delete `module[name]`
            del module._parameters[name]
            default_mask = torch.ones_like(orig)  # temp
        # If this is not the first time pruning is applied, all of the above
        # has been done before in a previous pruning iteration, so we're good
        # to go
        else:
            default_mask = (

            

Reported by Pylint.

Module 'torch' has no 'contiguous_format' member
Error

Line: 184 Column: 38

                          default_mask = (
                getattr(module, name + "_mask")
                .detach()
                .clone(memory_format=torch.contiguous_format)
            )

        # Use try/except because if anything goes wrong with the mask
        # computation etc., you'd want to roll back.
        try:

            

Reported by Pylint.

Module 'torch' has no 'ones_like' member
Error

Line: 235 Column: 70

                          ), "importance_scores should have the same shape as tensor t"
        else:
            importance_scores = t
        default_mask = default_mask if default_mask is not None else torch.ones_like(t)
        return t * self.compute_mask(importance_scores, default_mask=default_mask)

    def remove(self, module):
        r"""Removes the pruning reparameterization from a module. The pruned
        parameter named ``name`` remains permanently pruned, and the parameter

            

Reported by Pylint.

Instance of 'tuple' has no '_tensor_name' member
Error

Line: 280 Column: 33

                  def __init__(self, *args):
        self._pruning_methods: Tuple["BasePruningMethod", ...] = tuple()
        if not isinstance(args, Iterable):  # only 1 item
            self._tensor_name = args._tensor_name
            self.add_pruning_method(args)
        elif len(args) == 1:  # only 1 item in a tuple
            self._tensor_name = args[0]._tensor_name
            self.add_pruning_method(args[0])
        else:  # manual construction from list or other iterable (or no args)

            

Reported by Pylint.

Module 'torch' has no 'contiguous_format' member
Error

Line: 475 Column: 49

                      # than the number of units in the tensor
        _validate_pruning_amount(nparams_toprune, tensor_size)

        mask = default_mask.clone(memory_format=torch.contiguous_format)

        if nparams_toprune != 0:  # k=0 not supported by torch.kthvalue
            prob = torch.rand_like(t)
            topk = torch.topk(prob.view(-1), k=nparams_toprune)
            mask.view(-1)[topk.indices] = 0

            

Reported by Pylint.

Module 'torch' has no 'rand_like' member
Error

Line: 478 Column: 20

                      mask = default_mask.clone(memory_format=torch.contiguous_format)

        if nparams_toprune != 0:  # k=0 not supported by torch.kthvalue
            prob = torch.rand_like(t)
            topk = torch.topk(prob.view(-1), k=nparams_toprune)
            mask.view(-1)[topk.indices] = 0

        return mask


            

Reported by Pylint.

Module 'torch' has no 'topk' member
Error

Line: 479 Column: 20

              
        if nparams_toprune != 0:  # k=0 not supported by torch.kthvalue
            prob = torch.rand_like(t)
            topk = torch.topk(prob.view(-1), k=nparams_toprune)
            mask.view(-1)[topk.indices] = 0

        return mask

    @classmethod

            

Reported by Pylint.

Module 'torch' has no 'contiguous_format' member
Error

Line: 531 Column: 49

                      # than the number of units in the tensor
        _validate_pruning_amount(nparams_toprune, tensor_size)

        mask = default_mask.clone(memory_format=torch.contiguous_format)

        if nparams_toprune != 0:  # k=0 not supported by torch.kthvalue
            # largest=True --> top k; largest=False --> bottom k
            # Prune the smallest k
            topk = torch.topk(torch.abs(t).view(-1), k=nparams_toprune, largest=False)

            

Reported by Pylint.

Module 'torch' has no 'abs' member
Error

Line: 536 Column: 31

                      if nparams_toprune != 0:  # k=0 not supported by torch.kthvalue
            # largest=True --> top k; largest=False --> bottom k
            # Prune the smallest k
            topk = torch.topk(torch.abs(t).view(-1), k=nparams_toprune, largest=False)
            # topk will have .indices and .values
            mask.view(-1)[topk.indices] = 0

        return mask


            

Reported by Pylint.

Module 'torch' has no 'topk' member
Error

Line: 536 Column: 20

                      if nparams_toprune != 0:  # k=0 not supported by torch.kthvalue
            # largest=True --> top k; largest=False --> bottom k
            # Prune the smallest k
            topk = torch.topk(torch.abs(t).view(-1), k=nparams_toprune, largest=False)
            # topk will have .indices and .values
            mask.view(-1)[topk.indices] = 0

        return mask


            

Reported by Pylint.

torch/quantization/fx/utils.py
70 issues
Attempted relative import beyond top-level package
Error

Line: 4 Column: 1

              import re
import torch
import torch.nn as nn
from ..utils import is_per_tensor, is_per_channel
from ..quantize import is_activation_post_process

from torch.fx import GraphModule, map_arg

from torch.fx.graph import (

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 5 Column: 1

              import torch
import torch.nn as nn
from ..utils import is_per_tensor, is_per_channel
from ..quantize import is_activation_post_process

from torch.fx import GraphModule, map_arg

from torch.fx.graph import (
    Graph,

            

Reported by Pylint.

Module 'torch' has no 'quint8' member
Error

Line: 126 Column: 18

                  '''
    dtype = activation_post_process.dtype  # type: ignore[attr-defined]
    quantize_op : Optional[Union[Callable, str]] = None
    if dtype in [torch.quint8, torch.qint8]:
        node_type = "call_function"
        scale, zero_point = activation_post_process.calculate_qparams()  # type: ignore[attr-defined]
        if is_per_channel(activation_post_process.qscheme):  # type: ignore[attr-defined]
            ch_axis = int(activation_post_process.ch_axis)  # type: ignore[attr-defined]
            qparams = {"_scale_": scale, "_zero_point_": zero_point, "_axis_": ch_axis, "_dtype_": dtype}

            

Reported by Pylint.

Module 'torch' has no 'qint8' member
Error

Line: 126 Column: 32

                  '''
    dtype = activation_post_process.dtype  # type: ignore[attr-defined]
    quantize_op : Optional[Union[Callable, str]] = None
    if dtype in [torch.quint8, torch.qint8]:
        node_type = "call_function"
        scale, zero_point = activation_post_process.calculate_qparams()  # type: ignore[attr-defined]
        if is_per_channel(activation_post_process.qscheme):  # type: ignore[attr-defined]
            ch_axis = int(activation_post_process.ch_axis)  # type: ignore[attr-defined]
            qparams = {"_scale_": scale, "_zero_point_": zero_point, "_axis_": ch_axis, "_dtype_": dtype}

            

Reported by Pylint.

Module 'torch' has no 'quantize_per_channel' member
Error

Line: 132 Column: 27

                      if is_per_channel(activation_post_process.qscheme):  # type: ignore[attr-defined]
            ch_axis = int(activation_post_process.ch_axis)  # type: ignore[attr-defined]
            qparams = {"_scale_": scale, "_zero_point_": zero_point, "_axis_": ch_axis, "_dtype_": dtype}
            quantize_op = torch.quantize_per_channel
        else:
            scale = float(scale)
            zero_point = int(zero_point)
            qparams = {"_scale_": scale, "_zero_point_": zero_point, "_dtype_": dtype}
            quantize_op = torch.quantize_per_tensor

            

Reported by Pylint.

Module 'torch' has no 'quantize_per_tensor' member
Error

Line: 137 Column: 27

                          scale = float(scale)
            zero_point = int(zero_point)
            qparams = {"_scale_": scale, "_zero_point_": zero_point, "_dtype_": dtype}
            quantize_op = torch.quantize_per_tensor
    elif dtype == torch.float16:
        node_type = "call_method"
        quantize_op = "to"
        qparams = {"_dtype_": dtype}
    else:

            

Reported by Pylint.

Module 'torch' has no 'float16' member
Error

Line: 138 Column: 19

                          zero_point = int(zero_point)
            qparams = {"_scale_": scale, "_zero_point_": zero_point, "_dtype_": dtype}
            quantize_op = torch.quantize_per_tensor
    elif dtype == torch.float16:
        node_type = "call_method"
        quantize_op = "to"
        qparams = {"_dtype_": dtype}
    else:
        raise Exception("Unsupported dtype in get_quantize_node_info:" + str(dtype))

            

Reported by Pylint.

Module 'torch' has no 'float16' member
Error

Line: 237 Column: 17

                  return list(float_custom_module_classes)

def get_linear_prepack_op_for_dtype(dtype):
    if dtype == torch.float16:
        return torch.ops.quantized.linear_prepack_fp16
    elif dtype == torch.qint8:
        return torch.ops.quantized.linear_prepack
    else:
        raise Exception("can't get linear prepack op for dtype:", dtype)

            

Reported by Pylint.

Module 'torch' has no 'qint8' member
Error

Line: 239 Column: 19

              def get_linear_prepack_op_for_dtype(dtype):
    if dtype == torch.float16:
        return torch.ops.quantized.linear_prepack_fp16
    elif dtype == torch.qint8:
        return torch.ops.quantized.linear_prepack
    else:
        raise Exception("can't get linear prepack op for dtype:", dtype)

def get_qconv_prepack_op(conv_op: Callable) -> Callable:

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 364 Column: 39

                  get_new_attr_name = get_new_attr_name_with_prefix(prefix)
    attr_name = get_new_attr_name(module)
    device = assert_and_get_unique_device(module)
    module.register_buffer(attr_name, torch.tensor(value, device=device))
    # Create get_attr with value
    attr_node = graph.create_node("get_attr", attr_name)
    return attr_node

def create_qparam_nodes(

            

Reported by Pylint.

test/distributed/pipeline/sync/test_bugs.py
70 issues
Unable to import 'pytest'
Error

Line: 7 Column: 1

              #
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
import torch.nn.functional as F

from torch.distributed.pipeline.sync import Pipe

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 8 Column: 1

              # This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
import torch.nn.functional as F

from torch.distributed.pipeline.sync import Pipe


            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 9 Column: 1

              # LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
import torch.nn.functional as F

from torch.distributed.pipeline.sync import Pipe



            

Reported by Pylint.

Unable to import 'torch.nn.functional'
Error

Line: 10 Column: 1

              import pytest
import torch
from torch import nn
import torch.nn.functional as F

from torch.distributed.pipeline.sync import Pipe


def test_python_autograd_function(setup_rpc):

            

Reported by Pylint.

Unable to import 'torch.distributed.pipeline.sync'
Error

Line: 12 Column: 1

              from torch import nn
import torch.nn.functional as F

from torch.distributed.pipeline.sync import Pipe


def test_python_autograd_function(setup_rpc):
    # A Python autograd function might fail with this error:
    #

            

Reported by Pylint.

Unused argument 'setup_rpc'
Error

Line: 15 Column: 35

              from torch.distributed.pipeline.sync import Pipe


def test_python_autograd_function(setup_rpc):
    # A Python autograd function might fail with this error:
    #
    #   RuntimeError: Returning Variables sharing storage with other Variables
    #   that require grad is not supported in Python functions. Please submit a
    #   feature request if you hit this error.

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 28 Column: 26

                  #
    class Identity(torch.autograd.Function):
        @staticmethod
        def forward(ctx, input):
            return input

        @staticmethod
        def backward(ctx, grad):
            return grad

            

Reported by Pylint.

Unused argument 'ctx'
Error

Line: 28 Column: 21

                  #
    class Identity(torch.autograd.Function):
        @staticmethod
        def forward(ctx, input):
            return input

        @staticmethod
        def backward(ctx, grad):
            return grad

            

Reported by Pylint.

Unused argument 'ctx'
Error

Line: 32 Column: 22

                          return input

        @staticmethod
        def backward(ctx, grad):
            return grad

    class M(nn.Module):
        def forward(self, input):
            return Identity.apply(input)

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 36 Column: 27

                          return grad

    class M(nn.Module):
        def forward(self, input):
            return Identity.apply(input)

    model = nn.Sequential(M(), M())
    model = Pipe(model, checkpoint="always")


            

Reported by Pylint.