The following issues were found

torch/_utils.py
91 issues
Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 132 Column: 9

              
def _rebuild_tensor(storage, storage_offset, size, stride):
    # first construct a tensor with the correct dtype/device
    t = torch.tensor([], dtype=storage.dtype, device=storage.device)
    return t.set_(storage, storage_offset, size, stride)


def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
    tensor = _rebuild_tensor(storage, storage_offset, size, stride)

            

Reported by Pylint.

Module 'torch' has no '_validate_sparse_coo_tensor_args' member
Error

Line: 161 Column: 13

              def _validate_loaded_sparse_tensors():
    try:
        for t in _sparse_tensors_to_validate:
            torch._validate_sparse_coo_tensor_args(t._indices(), t._values(),
                                                   t.size())
    finally:
        _sparse_tensors_to_validate.clear()

def _rebuild_sparse_tensor(layout, data):

            

Reported by Pylint.

Module 'torch' has no 'sparse_coo' member
Error

Line: 167 Column: 18

                      _sparse_tensors_to_validate.clear()

def _rebuild_sparse_tensor(layout, data):
    if layout == torch.sparse_coo:
        indices, values, size = data
        result = torch._sparse_coo_tensor_unsafe(indices, values, size)
        _sparse_tensors_to_validate.append(result)
        return result


            

Reported by Pylint.

Module 'torch' has no '_sparse_coo_tensor_unsafe' member
Error

Line: 169 Column: 18

              def _rebuild_sparse_tensor(layout, data):
    if layout == torch.sparse_coo:
        indices, values, size = data
        result = torch._sparse_coo_tensor_unsafe(indices, values, size)
        _sparse_tensors_to_validate.append(result)
        return result

    raise NotImplementedError("rebuilding sparse tensor for layout %s" % (layout))


            

Reported by Pylint.

Module 'torch' has no 'from_numpy' member
Error

Line: 177 Column: 14

              

def _rebuild_xla_tensor(data, dtype, device, requires_grad):
    tensor = torch.from_numpy(data).to(dtype=dtype, device=device)
    tensor.requires_grad = requires_grad
    return tensor


def _rebuild_mlc_tensor(data, dtype, device, requires_grad):

            

Reported by Pylint.

Module 'torch' has no 'from_numpy' member
Error

Line: 183 Column: 14

              

def _rebuild_mlc_tensor(data, dtype, device, requires_grad):
    tensor = torch.from_numpy(data).to(dtype=dtype, device=device)
    tensor.requires_grad = requires_grad
    return tensor


def _rebuild_meta_tensor_no_storage(dtype, size, stride, requires_grad):

            

Reported by Pylint.

Module 'torch' has no 'empty_strided' member
Error

Line: 189 Column: 12

              

def _rebuild_meta_tensor_no_storage(dtype, size, stride, requires_grad):
    return torch.empty_strided(size, stride, dtype=dtype, device='meta', requires_grad=requires_grad)


def _rebuild_qtensor(storage, storage_offset, size, stride, quantizer_params, requires_grad, backward_hooks):
    qscheme = quantizer_params[0]
    if qscheme == torch.per_tensor_affine:

            

Reported by Pylint.

Module 'torch' has no 'per_tensor_affine' member
Error

Line: 194 Column: 19

              
def _rebuild_qtensor(storage, storage_offset, size, stride, quantizer_params, requires_grad, backward_hooks):
    qscheme = quantizer_params[0]
    if qscheme == torch.per_tensor_affine:
        _, scale, zero_point = quantizer_params
        tensor = torch._empty_affine_quantized(size, scale=scale, zero_point=zero_point, dtype=storage.dtype)
    elif qscheme in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
        _, scales, zero_points, axis = quantizer_params
        if type(scales) is list and type(zero_points) is list:

            

Reported by Pylint.

Module 'torch' has no '_empty_affine_quantized' member
Error

Line: 196 Column: 18

                  qscheme = quantizer_params[0]
    if qscheme == torch.per_tensor_affine:
        _, scale, zero_point = quantizer_params
        tensor = torch._empty_affine_quantized(size, scale=scale, zero_point=zero_point, dtype=storage.dtype)
    elif qscheme in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
        _, scales, zero_points, axis = quantizer_params
        if type(scales) is list and type(zero_points) is list:
            if qscheme == torch.per_channel_affine:
                scales = torch.tensor(scales, dtype=torch.double)

            

Reported by Pylint.

Module 'torch' has no 'per_channel_affine' member
Error

Line: 197 Column: 22

                  if qscheme == torch.per_tensor_affine:
        _, scale, zero_point = quantizer_params
        tensor = torch._empty_affine_quantized(size, scale=scale, zero_point=zero_point, dtype=storage.dtype)
    elif qscheme in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
        _, scales, zero_points, axis = quantizer_params
        if type(scales) is list and type(zero_points) is list:
            if qscheme == torch.per_channel_affine:
                scales = torch.tensor(scales, dtype=torch.double)
                zero_points = torch.tensor(zero_points, dtype=torch.long)

            

Reported by Pylint.

caffe2/python/transformations_test.py
90 issues
Unable to import 'hypothesis'
Error

Line: 21 Column: 1

              


from hypothesis import given
import hypothesis.strategies as st
import numpy as np

from caffe2.python.transformations import Transformer
from caffe2.python import core, workspace

            

Reported by Pylint.

Unable to import 'hypothesis.strategies'
Error

Line: 22 Column: 1

              

from hypothesis import given
import hypothesis.strategies as st
import numpy as np

from caffe2.python.transformations import Transformer
from caffe2.python import core, workspace
from caffe2.python import test_util as tu

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 46 Column: 9

                  expected_activation_arg=True):
        self._add_nnpack(net)
        transformer.FuseNNPACKConvRelu(net)
        self.assertEquals(tu.numOps(net), expected_result_num_ops)
        has_activation_arg = False
        for arg in net.Proto().op[0].arg:
            if tu.str_compare(arg.name, "activation"):
                assert tu.str_compare(arg.s, "Relu")
                has_activation_arg = True

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software

            

Reported by Pylint.

Missing class docstring
Error

Line: 32 Column: 1

              transformer = Transformer()


class TestTransformations(tu.TestCase):
    def _base_test_net(self):
        net = core.Net("net")
        net.Conv(["X", "w", "b"], ["Y"], stride=1, pad=0, kernel=3, order="NCHW")
        return net


            

Reported by Pylint.

Method could be a function
Error

Line: 33 Column: 5

              

class TestTransformations(tu.TestCase):
    def _base_test_net(self):
        net = core.Net("net")
        net.Conv(["X", "w", "b"], ["Y"], stride=1, pad=0, kernel=3, order="NCHW")
        return net

    def _add_nnpack(self, net):

            

Reported by Pylint.

Method could be a function
Error

Line: 38 Column: 5

                      net.Conv(["X", "w", "b"], ["Y"], stride=1, pad=0, kernel=3, order="NCHW")
        return net

    def _add_nnpack(self, net):
        transformer.AddNNPACK(net)
        assert tu.str_compare(net.Proto().op[0].engine, "NNPACK")

    def _fuse_nnpack_convrelu(self, net, expected_result_num_ops,
    expected_activation_arg=True):

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 40
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

              
    def _add_nnpack(self, net):
        transformer.AddNNPACK(net)
        assert tu.str_compare(net.Proto().op[0].engine, "NNPACK")

    def _fuse_nnpack_convrelu(self, net, expected_result_num_ops,
    expected_activation_arg=True):
        self._add_nnpack(net)
        transformer.FuseNNPACKConvRelu(net)

            

Reported by Bandit.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 50
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                      has_activation_arg = False
        for arg in net.Proto().op[0].arg:
            if tu.str_compare(arg.name, "activation"):
                assert tu.str_compare(arg.s, "Relu")
                has_activation_arg = True
        if expected_activation_arg:
            assert has_activation_arg
        else:
            assert not has_activation_arg

            

Reported by Bandit.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 53
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                              assert tu.str_compare(arg.s, "Relu")
                has_activation_arg = True
        if expected_activation_arg:
            assert has_activation_arg
        else:
            assert not has_activation_arg

    def test_transformer_AddNNPACK(self):
        net = self._base_test_net()

            

Reported by Bandit.

benchmarks/tensorexpr/benchmark.py
89 issues
Attempted relative import beyond top-level package
Error

Line: 5 Column: 1

              import numpy as np
import os
import time
from . import tensor_engine
import torch
import json


class Benchmark(object):

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 6 Column: 1

              import os
import time
from . import tensor_engine
import torch
import json


class Benchmark(object):
    def __init__(self, mode, device, dtype):

            

Reported by Pylint.

Instance of 'Benchmark' has no 'reference' member
Error

Line: 52 Column: 13

                      if not self.deterministic:
            return
        np.testing.assert_allclose(
            self.reference(), self.numpy(self.compute()), atol=1e-2
        )

    def config(self):
        """returns an array for the current benchmark configs
        """

            

Reported by Pylint.

Instance of 'Benchmark' has no 'numpy' member
Error

Line: 52 Column: 31

                      if not self.deterministic:
            return
        np.testing.assert_allclose(
            self.reference(), self.numpy(self.compute()), atol=1e-2
        )

    def config(self):
        """returns an array for the current benchmark configs
        """

            

Reported by Pylint.

Instance of 'Benchmark' has no 'inputs' member
Error

Line: 118 Column: 33

              
    def compute(self):
        if self.bm_jit:
            return self.bm_jit(*self.inputs)
        else:
            return self.forward(*self.inputs)

    def run(self, args):
        self.print_ir = args.print_ir

            

Reported by Pylint.

Instance of 'Benchmark' has no 'inputs' member
Error

Line: 120 Column: 34

                      if self.bm_jit:
            return self.bm_jit(*self.inputs)
        else:
            return self.forward(*self.inputs)

    def run(self, args):
        self.print_ir = args.print_ir
        if args.cuda_fuser == "old" :
            torch._C._jit_override_can_fuse_on_gpu(True)

            

Reported by Pylint.

Instance of 'Benchmark' has no 'jit_mode' member
Error

Line: 166 Column: 20

                              time_start = time.time()

            if i == 0:
                if self.jit_mode == "trace" and use_fuser :
                    self.bm_jit = torch.jit.trace(
                        self.forward, example_inputs=self.inputs, check_trace=False
                    )
                if callable(getattr(self, "reference", None)):
                    self.check()

            

Reported by Pylint.

Instance of 'Benchmark' has no 'inputs' member
Error

Line: 168 Column: 54

                          if i == 0:
                if self.jit_mode == "trace" and use_fuser :
                    self.bm_jit = torch.jit.trace(
                        self.forward, example_inputs=self.inputs, check_trace=False
                    )
                if callable(getattr(self, "reference", None)):
                    self.check()
                else:
                    print("Warning: no reference result for ", self.module())

            

Reported by Pylint.

Instance of 'Benchmark' has no 'jit_mode' member
Error

Line: 176 Column: 20

                                  print("Warning: no reference result for ", self.module())
            elif i == 1:
                # The fusion graph is visible after the first iter is executed
                if self.jit_mode == "trace" and use_fuser and self.print_ir :
                    print(self.bm_jit.graph_for(*self.inputs))
            z = self.compute()
            if self.mode == "both":
                if self.result_grad is None:
                    self.result_grad = engine.rand_like(z)

            

Reported by Pylint.

Instance of 'Benchmark' has no 'inputs' member
Error

Line: 177 Column: 50

                          elif i == 1:
                # The fusion graph is visible after the first iter is executed
                if self.jit_mode == "trace" and use_fuser and self.print_ir :
                    print(self.bm_jit.graph_for(*self.inputs))
            z = self.compute()
            if self.mode == "both":
                if self.result_grad is None:
                    self.result_grad = engine.rand_like(z)
                engine.backward([z], [self.result_grad], self.grad_variables)

            

Reported by Pylint.

torch/nn/quantized/modules/embedding_ops.py
88 issues
Module 'torch' has no 'quint8' member
Error

Line: 11 Column: 61

              class EmbeddingPackedParams(torch.nn.Module):
    _version = 1

    def __init__(self, num_embeddings, embedding_dim, dtype=torch.quint8):
        super(EmbeddingPackedParams, self).__init__()
        self.dtype = dtype
        if self.dtype in [torch.quint8, torch.quint4x2]:
            scales = torch.ones(num_embeddings, dtype=torch.float)
            zero_points = torch.zeros(num_embeddings, dtype=torch.float)

            

Reported by Pylint.

Module 'torch' has no 'quint8' member
Error

Line: 14 Column: 27

                  def __init__(self, num_embeddings, embedding_dim, dtype=torch.quint8):
        super(EmbeddingPackedParams, self).__init__()
        self.dtype = dtype
        if self.dtype in [torch.quint8, torch.quint4x2]:
            scales = torch.ones(num_embeddings, dtype=torch.float)
            zero_points = torch.zeros(num_embeddings, dtype=torch.float)
            wq = torch._empty_per_channel_affine_quantized([num_embeddings, embedding_dim], scales=scales,
                                                           zero_points=zero_points,
                                                           axis=0, dtype=self.dtype)

            

Reported by Pylint.

Module 'torch' has no 'quint4x2' member
Error

Line: 14 Column: 41

                  def __init__(self, num_embeddings, embedding_dim, dtype=torch.quint8):
        super(EmbeddingPackedParams, self).__init__()
        self.dtype = dtype
        if self.dtype in [torch.quint8, torch.quint4x2]:
            scales = torch.ones(num_embeddings, dtype=torch.float)
            zero_points = torch.zeros(num_embeddings, dtype=torch.float)
            wq = torch._empty_per_channel_affine_quantized([num_embeddings, embedding_dim], scales=scales,
                                                           zero_points=zero_points,
                                                           axis=0, dtype=self.dtype)

            

Reported by Pylint.

Module 'torch' has no 'float' member
Error

Line: 15 Column: 55

                      super(EmbeddingPackedParams, self).__init__()
        self.dtype = dtype
        if self.dtype in [torch.quint8, torch.quint4x2]:
            scales = torch.ones(num_embeddings, dtype=torch.float)
            zero_points = torch.zeros(num_embeddings, dtype=torch.float)
            wq = torch._empty_per_channel_affine_quantized([num_embeddings, embedding_dim], scales=scales,
                                                           zero_points=zero_points,
                                                           axis=0, dtype=self.dtype)
            self.set_weight(wq)

            

Reported by Pylint.

Module 'torch' has no 'ones' member
Error

Line: 15 Column: 22

                      super(EmbeddingPackedParams, self).__init__()
        self.dtype = dtype
        if self.dtype in [torch.quint8, torch.quint4x2]:
            scales = torch.ones(num_embeddings, dtype=torch.float)
            zero_points = torch.zeros(num_embeddings, dtype=torch.float)
            wq = torch._empty_per_channel_affine_quantized([num_embeddings, embedding_dim], scales=scales,
                                                           zero_points=zero_points,
                                                           axis=0, dtype=self.dtype)
            self.set_weight(wq)

            

Reported by Pylint.

Module 'torch' has no 'float' member
Error

Line: 16 Column: 61

                      self.dtype = dtype
        if self.dtype in [torch.quint8, torch.quint4x2]:
            scales = torch.ones(num_embeddings, dtype=torch.float)
            zero_points = torch.zeros(num_embeddings, dtype=torch.float)
            wq = torch._empty_per_channel_affine_quantized([num_embeddings, embedding_dim], scales=scales,
                                                           zero_points=zero_points,
                                                           axis=0, dtype=self.dtype)
            self.set_weight(wq)
        else:

            

Reported by Pylint.

Module 'torch' has no 'zeros' member
Error

Line: 16 Column: 27

                      self.dtype = dtype
        if self.dtype in [torch.quint8, torch.quint4x2]:
            scales = torch.ones(num_embeddings, dtype=torch.float)
            zero_points = torch.zeros(num_embeddings, dtype=torch.float)
            wq = torch._empty_per_channel_affine_quantized([num_embeddings, embedding_dim], scales=scales,
                                                           zero_points=zero_points,
                                                           axis=0, dtype=self.dtype)
            self.set_weight(wq)
        else:

            

Reported by Pylint.

Module 'torch' has no '_empty_per_channel_affine_quantized' member
Error

Line: 17 Column: 18

                      if self.dtype in [torch.quint8, torch.quint4x2]:
            scales = torch.ones(num_embeddings, dtype=torch.float)
            zero_points = torch.zeros(num_embeddings, dtype=torch.float)
            wq = torch._empty_per_channel_affine_quantized([num_embeddings, embedding_dim], scales=scales,
                                                           zero_points=zero_points,
                                                           axis=0, dtype=self.dtype)
            self.set_weight(wq)
        else:
            raise NotImplementedError('Unsupported dtype on quantized embedding! Supports quint8 and quint4x2.')

            

Reported by Pylint.

Module 'torch' has no 'quint4x2' member
Error

Line: 26 Column: 41

              
    @torch.jit.export
    def set_weight(self, weight: torch.Tensor) -> None:
        if self.dtype in [torch.quint8, torch.quint4x2]:
            self._packed_weight = torch.ops.quantized.embedding_bag_prepack(weight)
        else:
            raise NotImplementedError('Unsupported dtype for quantized embedding prepack! Supports quint8 and quint4x2.')



            

Reported by Pylint.

Module 'torch' has no 'quint8' member
Error

Line: 26 Column: 27

              
    @torch.jit.export
    def set_weight(self, weight: torch.Tensor) -> None:
        if self.dtype in [torch.quint8, torch.quint4x2]:
            self._packed_weight = torch.ops.quantized.embedding_bag_prepack(weight)
        else:
            raise NotImplementedError('Unsupported dtype for quantized embedding prepack! Supports quint8 and quint4x2.')



            

Reported by Pylint.

benchmarks/operator_benchmark/pt/qembedding_bag_lookups_test.py
88 issues
Unable to import 'torch'
Error

Line: 3 Column: 1

              
import operator_benchmark as op_bench
import torch
import numpy as np
from typing import Optional

from torch.testing._internal.common_quantization import (
    lengths_to_offsets
)

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_quantization'
Error

Line: 7 Column: 1

              import numpy as np
from typing import Optional

from torch.testing._internal.common_quantization import (
    lengths_to_offsets
)

torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators")


            

Reported by Pylint.

Module 'operator_benchmark' has no 'cross_product_configs' member
Error

Line: 14 Column: 47

              torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators")


embedding_bag_rowwise_offsets_short_configs = op_bench.cross_product_configs(
    num_embeddings=(80,),
    embedding_dim=(128, 256),
    num_offsets=range(2, 10),
    enable_per_sample_weights=(True, False),
    include_last_offset=(True, False),

            

Reported by Pylint.

Module 'operator_benchmark' has no 'cross_product_configs' member
Error

Line: 27 Column: 46

              )


embedding_bag_rowwise_offsets_long_configs = op_bench.cross_product_configs(
    num_embeddings=(100, 120, 1000, 10_000, 20_000),
    embedding_dim=(16, 64, 128, 256),
    num_offsets=range(10, 20),
    enable_per_sample_weights=(True, False),
    include_last_offset=(True, False),

            

Reported by Pylint.

Module 'operator_benchmark' has no 'op_list' member
Error

Line: 42 Column: 24

              
full_configs = embedding_bag_rowwise_offsets_short_configs + embedding_bag_rowwise_offsets_long_configs

four_bit_rowwise_ops = op_bench.op_list(
    attrs=(
        ('qembeddingbag_4bit_rowwise_offsets', torch.ops.quantized.embedding_bag_4bit_rowwise_offsets),
    ),
    attr_names=('op_name', 'op_func'),
)

            

Reported by Pylint.

Module 'operator_benchmark' has no 'op_list' member
Error

Line: 49 Column: 20

                  attr_names=('op_name', 'op_func'),
)

byte_rowwise_ops = op_bench.op_list(
    attrs=(
        ('qembeddingbag_byte_rowwise_offsets', torch.ops.quantized.embedding_bag_byte_rowwise_offsets),
    ),
    attr_names=('op_name', 'op_func'),
)

            

Reported by Pylint.

Module 'operator_benchmark' has no 'TorchBenchmarkBase' member
Error

Line: 67 Column: 43

                  return q_pruned_weights, compressed_indices_mapping


class EmbedddingBag4BitRowwiseOffsetsTest(op_bench.TorchBenchmarkBase):
    def init(self,
             num_embeddings: int,
             embedding_dim: int,
             num_offsets: int,
             enable_per_sample_weights: bool,

            

Reported by Pylint.

Module 'operator_benchmark' has no 'TorchBenchmarkBase' member
Error

Line: 153 Column: 43

                                          compressed_indices_mapping=compressed_indices)


class EmbedddingBagByteRowwiseOffsetsTest(op_bench.TorchBenchmarkBase):
    def init(self,
             num_embeddings: int,
             embedding_dim: int,
             num_offsets: int,
             enable_per_sample_weights: bool,

            

Reported by Pylint.

Module 'operator_benchmark' has no 'generate_pt_tests_from_op_list' member
Error

Line: 239 Column: 1

                                          compressed_indices_mapping=self.compressed_indices)


op_bench.generate_pt_tests_from_op_list(four_bit_rowwise_ops,
                                        full_configs,
                                        EmbedddingBag4BitRowwiseOffsetsTest)
op_bench.generate_pt_tests_from_op_list(byte_rowwise_ops,
                                        full_configs,
                                        EmbedddingBagByteRowwiseOffsetsTest)

            

Reported by Pylint.

Module 'operator_benchmark' has no 'generate_pt_tests_from_op_list' member
Error

Line: 242 Column: 1

              op_bench.generate_pt_tests_from_op_list(four_bit_rowwise_ops,
                                        full_configs,
                                        EmbedddingBag4BitRowwiseOffsetsTest)
op_bench.generate_pt_tests_from_op_list(byte_rowwise_ops,
                                        full_configs,
                                        EmbedddingBagByteRowwiseOffsetsTest)


if __name__ == "__main__":

            

Reported by Pylint.

caffe2/python/python_op_test.py
88 issues
Unable to import 'hypothesis'
Error

Line: 8 Column: 1

              from caffe2.python import core, workspace
from caffe2.python.core import CreatePythonOperator
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np


class CustomError(Exception):

            

Reported by Pylint.

Unable to import 'hypothesis.strategies'
Error

Line: 9 Column: 1

              from caffe2.python.core import CreatePythonOperator
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np


class CustomError(Exception):
    pass

            

Reported by Pylint.

Module 'caffe2.python._import_c_extension' has no 'Workspace' member
Error

Line: 129 Column: 14

                          fetched = ws.blobs['internal'].fetch()
            np.testing.assert_almost_equal(fetched, x)

        ws = workspace.C.Workspace()
        net = core.Net("test")
        net.GivenTensorFill([], ['internal'], values=x, shape=x.shape)
        net.Python(f, pass_workspace=True)([], [])
        ws.run(net)


            

Reported by Pylint.

Unused argument 'inputs'
Error

Line: 21 Column: 41

                  raise CustomError("This is an intentional exception.")


def MainOpFunctionThatThrowsCustomError(inputs, _):
    return SubFunctionThatThrowsCustomError()

def MainOpFunctionThatThrowsCustomErrorInBuilder(inputs, _):
    raise CustomError("This is an intentional exception in builder.")


            

Reported by Pylint.

Unused argument 'outputs'
Error

Line: 33 Column: 23

                  assert index == 5
    assert extra - 4.2 < 0.0001

    def my_op(inputs, outputs):
        assert inputs[0].data[0] == iterations[0]
        assert name == 'name'
        assert index == 5
        assert extra - 4.2 < 0.0001
        iterations[0] += 1

            

Reported by Pylint.

Unused argument 'outputs'
Error

Line: 125 Column: 23

                      """
        Verify that python op can manipulate workspace directly
        """
        def f(inputs, outputs, ws):
            fetched = ws.blobs['internal'].fetch()
            np.testing.assert_almost_equal(fetched, x)

        ws = workspace.C.Workspace()
        net = core.Net("test")

            

Reported by Pylint.

Unused argument 'inputs'
Error

Line: 125 Column: 15

                      """
        Verify that python op can manipulate workspace directly
        """
        def f(inputs, outputs, ws):
            fetched = ws.blobs['internal'].fetch()
            np.testing.assert_almost_equal(fetched, x)

        ws = workspace.C.Workspace()
        net = core.Net("test")

            

Reported by Pylint.

Unused argument 'outputs'
Error

Line: 137 Column: 23

              
    @given(x=hu.tensor())
    def test_caught_exception_doesnt_terminate(self, x):
        def f(inputs, outputs):
            try:
                raise Exception("Exception in handler")
            except Exception:
                pass


            

Reported by Pylint.

Unused argument 'inputs'
Error

Line: 137 Column: 15

              
    @given(x=hu.tensor())
    def test_caught_exception_doesnt_terminate(self, x):
        def f(inputs, outputs):
            try:
                raise Exception("Exception in handler")
            except Exception:
                pass


            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 140 Column: 20

                      def f(inputs, outputs):
            try:
                raise Exception("Exception in handler")
            except Exception:
                pass

        op = CreatePythonOperator(f, ["x"], ["y"])
        workspace.FeedBlob("x", x)
        workspace.RunOperatorOnce(op)

            

Reported by Pylint.

torch/testing/_internal/common_distributed.py
88 issues
Module 'torch' has no 'reshape' member
Error

Line: 329 Column: 19

                      # First sparse dimension is [0..rank].
        # Subsequent dimensions are always 0, so we know there is
        # a non-empty intersection between any two sparse tensors.
        indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1))
        shape = [world_size] + [2 for _ in range(dense_dims)]
        for _ in range(sparse_dims - 1):
            indices = torch.cat((indices, torch.zeros(1, rank + 1)))
            shape.append(world_size)
        values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)])

            

Reported by Pylint.

Module 'torch' has no 'arange' member
Error

Line: 329 Column: 33

                      # First sparse dimension is [0..rank].
        # Subsequent dimensions are always 0, so we know there is
        # a non-empty intersection between any two sparse tensors.
        indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1))
        shape = [world_size] + [2 for _ in range(dense_dims)]
        for _ in range(sparse_dims - 1):
            indices = torch.cat((indices, torch.zeros(1, rank + 1)))
            shape.append(world_size)
        values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)])

            

Reported by Pylint.

Module 'torch' has no 'zeros' member
Error

Line: 332 Column: 43

                      indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1))
        shape = [world_size] + [2 for _ in range(dense_dims)]
        for _ in range(sparse_dims - 1):
            indices = torch.cat((indices, torch.zeros(1, rank + 1)))
            shape.append(world_size)
        values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)])
        return torch.sparse_coo_tensor(indices, values, shape)

    def compute_sum(fn, world_size: int):

            

Reported by Pylint.

Module 'torch' has no 'cat' member
Error

Line: 332 Column: 23

                      indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1))
        shape = [world_size] + [2 for _ in range(dense_dims)]
        for _ in range(sparse_dims - 1):
            indices = torch.cat((indices, torch.zeros(1, rank + 1)))
            shape.append(world_size)
        values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)])
        return torch.sparse_coo_tensor(indices, values, shape)

    def compute_sum(fn, world_size: int):

            

Reported by Pylint.

Module 'torch' has no 'ones' member
Error

Line: 334 Column: 18

                      for _ in range(sparse_dims - 1):
            indices = torch.cat((indices, torch.zeros(1, rank + 1)))
            shape.append(world_size)
        values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)])
        return torch.sparse_coo_tensor(indices, values, shape)

    def compute_sum(fn, world_size: int):
        return reduce(
            lambda a, b: a + b, [fn(rank, world_size) for rank in range(world_size)]

            

Reported by Pylint.

Module 'torch' has no 'sparse_coo_tensor' member
Error

Line: 335 Column: 16

                          indices = torch.cat((indices, torch.zeros(1, rank + 1)))
            shape.append(world_size)
        values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)])
        return torch.sparse_coo_tensor(indices, values, shape)

    def compute_sum(fn, world_size: int):
        return reduce(
            lambda a, b: a + b, [fn(rank, world_size) for rank in range(world_size)]
        )

            

Reported by Pylint.

Access to a protected member _get_ddp_logging_data of a client class
Error

Line: 137 Column: 24

              
def verify_ddp_error_logged(model_DDP, err_substr):
    # Verify error was logged in ddp_logging_data.
    ddp_logging_data = model_DDP._get_ddp_logging_data()
    assert "has_error" in ddp_logging_data
    assert "error" in ddp_logging_data
    assert err_substr in ddp_logging_data["error"]



            

Reported by Pylint.

Using the global statement
Error

Line: 365 Column: 5

              

def initialize_temp_directories(init_method: Optional[str] = None) -> None:
    global tmp_dir
    tmp_dir = tempfile.TemporaryDirectory()
    os.environ["TEMP_DIR"] = tmp_dir.name
    os.mkdir(os.path.join(tmp_dir.name, "barrier"))
    os.mkdir(os.path.join(tmp_dir.name, "test_dir"))
    init_dir_path = os.path.join(tmp_dir.name, "init_dir")

            

Reported by Pylint.

Access to a protected member _run of a client class
Error

Line: 461 Column: 24

                      for rank in range(int(self.world_size)):
            parent_conn, child_conn = torch.multiprocessing.Pipe()
            process = proc(
                target=self.__class__._run,
                name="process " + str(rank),
                args=(rank, self._current_test_name(), self.file_name, child_conn),
            )
            process.start()
            logger.info(f"Started process {rank} with pid {process.pid}")

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 466 Column: 13

                              args=(rank, self._current_test_name(), self.file_name, child_conn),
            )
            process.start()
            logger.info(f"Started process {rank} with pid {process.pid}")
            self.pid_to_pipe[process.pid] = parent_conn
            self.processes.append(process)

    def _fork_processes(self) -> None:
        proc = torch.multiprocessing.get_context("fork").Process

            

Reported by Pylint.

test/distributed/pipeline/sync/test_balance.py
87 issues
Unable to import 'pytest'
Error

Line: 9 Column: 1

              # LICENSE file in the root directory of this source tree.
import time

import pytest
import torch
from torch import nn

from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 10 Column: 1

              import time

import pytest
import torch
from torch import nn

from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox


            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 11 Column: 1

              
import pytest
import torch
from torch import nn

from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox

skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")

            

Reported by Pylint.

Unable to import 'torch.distributed.pipeline.sync._balance'
Error

Line: 13 Column: 1

              import torch
from torch import nn

from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox

skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")

devices = ["cpu"]

            

Reported by Pylint.

Unable to import 'torch.distributed.pipeline.sync._balance.profile'
Error

Line: 14 Column: 1

              from torch import nn

from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox

skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")

devices = ["cpu"]
if torch.cuda.is_available():

            

Reported by Pylint.

Unused variable 'i'
Error

Line: 83 Column: 17

                          self.times = times

        def forward(self, x):
            for i in range(self.times):
                x = x + torch.rand_like(x, requires_grad=True)
            return x

    sample = torch.rand(10, 100, 100)


            

Reported by Pylint.

Unused variable 'i'
Error

Line: 120 Column: 17

                          self.latent_size = latent_size

        def forward(self, x):
            for i in range(self.latent_size):
                x = x + torch.rand_like(x, requires_grad=True)
            return x

    model = nn.Sequential(
        Tradeoff(param_size=1, latent_size=6),

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import time

import pytest

            

Reported by Pylint.

Line too long (101/100)
Error

Line: 13 Column: 1

              import torch
from torch import nn

from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox

skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")

devices = ["cpu"]

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 23 Column: 1

                  devices.append("cuda")


def test_blockpartition():
    assert blockpartition.solve([1, 2, 3, 4, 5, 6], partitions=2) == [[1, 2, 3, 4], [5, 6]]


def test_blockpartition_zeros():
    assert blockpartition.solve([0, 0], partitions=2) == [[0], [0]]

            

Reported by Pylint.

test/jit/test_remove_mutation.py
87 issues
Unable to import 'torch'
Error

Line: 4 Column: 1

              import os
import sys

import torch
from torch.testing import FileCheck
from typing import List

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

            

Reported by Pylint.

Unable to import 'torch.testing'
Error

Line: 5 Column: 1

              import sys

import torch
from torch.testing import FileCheck
from typing import List

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 11 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, freeze_rng_state

if __name__ == '__main__':
    raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
                       "\tpython test/test_jit.py TESTNAME\n\n"
                       "instead.")

            

Reported by Pylint.

function already defined line 104
Error

Line: 119 Column: 9

                      FileCheck().check("aten::add_").run(foo.graph)

        @torch.jit.script
        def foo(cond: bool, y):
            if cond:
                x = y
            else:
                x = torch.tensor(2)
            z = x.add_(2)

            

Reported by Pylint.

Unused variable 'b'
Error

Line: 185 Column: 13

              
        def intermediary_use():
            a = [1, 2]
            b = len(a)
            a.append(3)
            return a

        fn = torch.jit.script(intermediary_use)
        graph = fn.graph

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 209 Column: 9

              
        fn = torch.jit.script(successful_remove)
        graph = fn.graph
        torch._C._jit_pass_remove_mutation(graph)
        torch._C._jit_pass_constant_propagation(graph)
        FileCheck().check("graph").check_next("Constant").check_next("return").run(graph)
        self.assertEqual(successful_remove(), fn())

    def test_list_indexing_removal(self):

            

Reported by Pylint.

Access to a protected member _jit_pass_remove_mutation of a client class
Error

Line: 209 Column: 9

              
        fn = torch.jit.script(successful_remove)
        graph = fn.graph
        torch._C._jit_pass_remove_mutation(graph)
        torch._C._jit_pass_constant_propagation(graph)
        FileCheck().check("graph").check_next("Constant").check_next("return").run(graph)
        self.assertEqual(successful_remove(), fn())

    def test_list_indexing_removal(self):

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 210 Column: 9

                      fn = torch.jit.script(successful_remove)
        graph = fn.graph
        torch._C._jit_pass_remove_mutation(graph)
        torch._C._jit_pass_constant_propagation(graph)
        FileCheck().check("graph").check_next("Constant").check_next("return").run(graph)
        self.assertEqual(successful_remove(), fn())

    def test_list_indexing_removal(self):
        @torch.jit.script

            

Reported by Pylint.

Access to a protected member _jit_pass_constant_propagation of a client class
Error

Line: 210 Column: 9

                      fn = torch.jit.script(successful_remove)
        graph = fn.graph
        torch._C._jit_pass_remove_mutation(graph)
        torch._C._jit_pass_constant_propagation(graph)
        FileCheck().check("graph").check_next("Constant").check_next("return").run(graph)
        self.assertEqual(successful_remove(), fn())

    def test_list_indexing_removal(self):
        @torch.jit.script

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 221 Column: 9

                          x[4] = 3
            return x

        torch._C._jit_pass_remove_mutation(out_of_bounds.graph)
        FileCheck().check("set_item").run(out_of_bounds.graph)

        @torch.jit.script
        def unknown(y: int):
            x = [1, 2]

            

Reported by Pylint.

test/test_bundled_inputs.py
87 issues
Unable to import 'torch'
Error

Line: 6 Column: 1

              import textwrap
from typing import List

import torch
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests


def model_size(sm):

            

Reported by Pylint.

Unable to import 'torch.utils.bundled_inputs'
Error

Line: 7 Column: 1

              from typing import List

import torch
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests


def model_size(sm):
    buffer = io.BytesIO()

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 8 Column: 1

              
import torch
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests


def model_size(sm):
    buffer = io.BytesIO()
    torch.jit.save(sm, buffer)

            

Reported by Pylint.

Access to a protected member _generate_bundled_inputs_for_forward of a client class
Error

Line: 248 Column: 13

                      # inputs not defined so should fail
        with self.assertRaises(Exception):
            mm = torch.jit.script(MultipleMethodModel())
            mm._generate_bundled_inputs_for_forward()
            torch.utils.bundled_inputs.augment_many_model_functions_with_bundled_inputs(
                mm,
                inputs={
                    mm.forward : None,
                    mm.foo : samples,

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              #!/usr/bin/env python3
import io
import textwrap
from typing import List

import torch
import torch.utils.bundled_inputs
from torch.testing._internal.common_utils import TestCase, run_tests


            

Reported by Pylint.

Argument name "sm" doesn't conform to snake_case naming style
Error

Line: 11 Column: 1

              from torch.testing._internal.common_utils import TestCase, run_tests


def model_size(sm):
    buffer = io.BytesIO()
    torch.jit.save(sm, buffer)
    return len(buffer.getvalue())



            

Reported by Pylint.

Missing function or method docstring
Error

Line: 11 Column: 1

              from torch.testing._internal.common_utils import TestCase, run_tests


def model_size(sm):
    buffer = io.BytesIO()
    torch.jit.save(sm, buffer)
    return len(buffer.getvalue())



            

Reported by Pylint.

Argument name "sm" doesn't conform to snake_case naming style
Error

Line: 17 Column: 1

                  return len(buffer.getvalue())


def save_and_load(sm):
    buffer = io.BytesIO()
    torch.jit.save(sm, buffer)
    buffer.seek(0)
    return torch.jit.load(buffer)


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 17 Column: 1

                  return len(buffer.getvalue())


def save_and_load(sm):
    buffer = io.BytesIO()
    torch.jit.save(sm, buffer)
    buffer.seek(0)
    return torch.jit.load(buffer)


            

Reported by Pylint.

Missing class docstring
Error

Line: 24 Column: 1

                  return torch.jit.load(buffer)


class TestBundledInputs(TestCase):

    def test_single_tensors(self):
        class SingleTensorModel(torch.nn.Module):
            def forward(self, arg):
                return arg

            

Reported by Pylint.