The following issues were found

benchmarks/tensorexpr/microbenchmarks.py
130 issues
Unable to import 'torch'
Error

Line: 1 Column: 1

              import torch
import torch._C._te as te
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import argparse


            

Reported by Pylint.

Unable to import 'torch._C._te'
Error

Line: 2 Column: 1

              import torch
import torch._C._te as te
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import argparse


            

Reported by Pylint.

Unable to import 'pandas'
Error

Line: 5 Column: 1

              import torch._C._te as te
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import argparse

class kernel_arena_scope(object):

            

Reported by Pylint.

Unable to import 'seaborn'
Error

Line: 7 Column: 1

              import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import argparse

class kernel_arena_scope(object):
    def __enter__(self):
        self.scope = te.KernelScope()

            

Reported by Pylint.

Attribute 'scope' defined outside __init__
Error

Line: 12 Column: 9

              
class kernel_arena_scope(object):
    def __enter__(self):
        self.scope = te.KernelScope()

    def __exit__(self, typ, val, traceback):
        self.scope = None

unary_ops = [

            

Reported by Pylint.

Attribute 'scope' defined outside __init__
Error

Line: 15 Column: 9

                      self.scope = te.KernelScope()

    def __exit__(self, typ, val, traceback):
        self.scope = None

unary_ops = [
    ("sin", torch.sin),
    ("cos", torch.cos),
    ("tan", torch.tan),

            

Reported by Pylint.

Redefining name 'nnc_name' from outer scope (line 132)
Error

Line: 50 Column: 23

                  # ("isnan", torch.isnan), # no out variant
]

def gen_unary_nnc_fun(nnc_name):
    def nnc_fun(A, B):
        def compute(i, j):
            return getattr(A.load([i, j]), nnc_name)()
        return compute
    return nnc_fun

            

Reported by Pylint.

Unused argument 'B'
Error

Line: 51 Column: 20

              ]

def gen_unary_nnc_fun(nnc_name):
    def nnc_fun(A, B):
        def compute(i, j):
            return getattr(A.load([i, j]), nnc_name)()
        return compute
    return nnc_fun


            

Reported by Pylint.

Unused argument 'b'
Error

Line: 58 Column: 22

                  return nnc_fun

def gen_unary_torch_fun(torch_op):
    def torch_fun(a, b, out):
        def fun():
            return torch_op(a, out=out)
        return fun
    return torch_fun


            

Reported by Pylint.

Unused argument 'B'
Error

Line: 102 Column: 17

              ]


def nnc_relu(A, B):
    def f(i, j):
        return torch._C._te.ifThenElse(A.load([i, j]) < torch._C._te.ExprHandle.float(0),
                                       torch._C._te.ExprHandle.float(0), A.load([i, j]))
    return f


            

Reported by Pylint.

test/jit/test_misc.py
129 issues
Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 3 Column: 1

              from typing import Any, Dict, List, Optional, Tuple

from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.testing import FileCheck
from torch import jit
from jit.test_module_interface import TestModuleInterface  # noqa: F401
import unittest
import os
import sys

            

Reported by Pylint.

Unable to import 'torch.testing'
Error

Line: 4 Column: 1

              from typing import Any, Dict, List, Optional, Tuple

from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.testing import FileCheck
from torch import jit
from jit.test_module_interface import TestModuleInterface  # noqa: F401
import unittest
import os
import sys

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 5 Column: 1

              
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.testing import FileCheck
from torch import jit
from jit.test_module_interface import TestModuleInterface  # noqa: F401
import unittest
import os
import sys
import torch

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 10 Column: 1

              import unittest
import os
import sys
import torch
import torch.testing._internal.jit_utils
import torch.nn as nn

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 11 Column: 1

              import os
import sys
import torch
import torch.testing._internal.jit_utils
import torch.nn as nn

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)

            

Reported by Pylint.

Unable to import 'torch.nn'
Error

Line: 12 Column: 1

              import sys
import torch
import torch.testing._internal.jit_utils
import torch.nn as nn

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)


            

Reported by Pylint.

class already defined line 68
Error

Line: 73 Column: 9

                                  pass
            torch.jit.script(M())

        class M(torch.nn.Module):
            def forward(self, *, n_tokens: int, device_name: str):
                return n_tokens, device_name

        sm = torch.jit.script(M())


            

Reported by Pylint.

Unable to import 'torch._jit_internal'
Error

Line: 236 Column: 9

                      """
        Test BroadcastingList and torch.nn._size_N_t alias
        """
        from torch._jit_internal import BroadcastingList2
        from torch.nn.common_types import _size_2_t

        def sum_i(x: _size_2_t) -> int:
            return x[0] + x[1]


            

Reported by Pylint.

Unable to import 'torch.nn.common_types'
Error

Line: 237 Column: 9

                      Test BroadcastingList and torch.nn._size_N_t alias
        """
        from torch._jit_internal import BroadcastingList2
        from torch.nn.common_types import _size_2_t

        def sum_i(x: _size_2_t) -> int:
            return x[0] + x[1]

        def sum_f(x: BroadcastingList2[float]) -> float:

            

Reported by Pylint.

Unused TestModuleInterface imported from jit.test_module_interface
Error

Line: 6 Column: 1

              from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.testing import FileCheck
from torch import jit
from jit.test_module_interface import TestModuleInterface  # noqa: F401
import unittest
import os
import sys
import torch
import torch.testing._internal.jit_utils

            

Reported by Pylint.

test/jit/test_scriptmod_ann.py
129 issues
Unable to import 'torch'
Error

Line: 5 Column: 1

              import sys
import warnings

import torch
from typing import List, Dict, Optional

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 11 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase

if __name__ == '__main__':
    raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
                       "\tpython test/test_jit.py TESTNAME\n\n"
                       "instead.")

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 241 Column: 9

                              torch.jit.script(M())

    def test_annotated_with_torch_jit_import(self):
        from torch import jit

        class M(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.x = jit.annotate(Optional[str], None)

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import os
import sys
import warnings

import torch
from typing import List, Dict, Optional

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

            

Reported by Pylint.

standard import "from typing import List, Dict, Optional" should be placed before "import torch"
Error

Line: 6 Column: 1

              import warnings

import torch
from typing import List, Dict, Optional

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase

            

Reported by Pylint.

Import "from torch.testing._internal.jit_utils import JitTestCase" should be placed at the top of the module
Error

Line: 11 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase

if __name__ == '__main__':
    raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
                       "\tpython test/test_jit.py TESTNAME\n\n"
                       "instead.")

            

Reported by Pylint.

Missing class docstring
Error

Line: 18 Column: 1

                                     "\tpython test/test_jit.py TESTNAME\n\n"
                       "instead.")

class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):

    # NB: There are no tests for `Tuple` or `NamedTuple` here. In fact,
    # reassigning a non-empty Tuple to an attribute previously typed
    # as containing an empty Tuple SHOULD fail. See note in `_check.py`


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 24 Column: 5

                  # reassigning a non-empty Tuple to an attribute previously typed
    # as containing an empty Tuple SHOULD fail. See note in `_check.py`

    def test_annotated_falsy_base_type(self):
        class M(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.x: int = 0


            

Reported by Pylint.

Class name "M" doesn't conform to PascalCase naming style
Error

Line: 25 Column: 9

                  # as containing an empty Tuple SHOULD fail. See note in `_check.py`

    def test_annotated_falsy_base_type(self):
        class M(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.x: int = 0

            def forward(self, x: int):

            

Reported by Pylint.

Missing class docstring
Error

Line: 25 Column: 9

                  # as containing an empty Tuple SHOULD fail. See note in `_check.py`

    def test_annotated_falsy_base_type(self):
        class M(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.x: int = 0

            def forward(self, x: int):

            

Reported by Pylint.

torch/onnx/symbolic_opset10.py
129 issues
Module 'torch' has no 'int64' member
Error

Line: 46 Column: 63

                      # Integer division does trunction rounding
        div = g.op("Div", self, other)
        # Division is negative if: self < 0 != other < 0
        zero = g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64))
        negative = g.op("Xor",
                        g.op("Less", self, zero),
                        g.op("Less", other, zero))

        # For negative numbers with self % other != 0, subtract 1 to round down instead of up

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 46 Column: 41

                      # Integer division does trunction rounding
        div = g.op("Div", self, other)
        # Division is negative if: self < 0 != other < 0
        zero = g.op("Constant", value_t=torch.tensor(0, dtype=torch.int64))
        negative = g.op("Xor",
                        g.op("Less", self, zero),
                        g.op("Less", other, zero))

        # For negative numbers with self % other != 0, subtract 1 to round down instead of up

            

Reported by Pylint.

Module 'torch' has no 'int64' member
Error

Line: 56 Column: 62

                      fixup_mask = g.op("And", negative,
                          g.op("Not", g.op("Equal", mod, zero)))

        one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64))
        fixup = g.op("Sub", div, one)
        return g.op("Where", fixup_mask, fixup, div)


@parse_args("v", "i", "i", "none")

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 56 Column: 40

                      fixup_mask = g.op("And", negative,
                          g.op("Not", g.op("Equal", mod, zero)))

        one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64))
        fixup = g.op("Sub", div, one)
        return g.op("Where", fixup_mask, fixup, div)


@parse_args("v", "i", "i", "none")

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 180 Column: 45

                      starts = sym_help._unsqueeze_helper(g, starts, [0])
        ends = sym_help._unsqueeze_helper(g, ends, [0])
        if isinstance(axes, int):
            axes = g.op("Constant", value_t=torch.tensor(axes))
        axes = sym_help._unsqueeze_helper(g, axes, [0])
    else:
        assert len(starts) == len(ends)
        assert len(starts) == len(axes)
        assert steps is None or len(starts) == len(steps)

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 189 Column: 41

                      if len(starts) == 1 and starts[0] == 0 and ends[0] == 9223372036854775807\
           and (steps is None or (len(steps) == 1 and steps[0] == 1)):
            return input
        axes = g.op("Constant", value_t=torch.tensor(axes))
        starts = g.op("Constant", value_t=torch.tensor(starts))
        ends = g.op("Constant", value_t=torch.tensor(ends))
    if steps is None:
        return g.op("Slice", input, starts, ends, axes)
    steps = g.op("Constant", value_t=torch.tensor(steps))

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 190 Column: 43

                         and (steps is None or (len(steps) == 1 and steps[0] == 1)):
            return input
        axes = g.op("Constant", value_t=torch.tensor(axes))
        starts = g.op("Constant", value_t=torch.tensor(starts))
        ends = g.op("Constant", value_t=torch.tensor(ends))
    if steps is None:
        return g.op("Slice", input, starts, ends, axes)
    steps = g.op("Constant", value_t=torch.tensor(steps))
    return g.op("Slice", input, starts, ends, axes, steps)

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 191 Column: 41

                          return input
        axes = g.op("Constant", value_t=torch.tensor(axes))
        starts = g.op("Constant", value_t=torch.tensor(starts))
        ends = g.op("Constant", value_t=torch.tensor(ends))
    if steps is None:
        return g.op("Slice", input, starts, ends, axes)
    steps = g.op("Constant", value_t=torch.tensor(steps))
    return g.op("Slice", input, starts, ends, axes, steps)


            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 194 Column: 38

                      ends = g.op("Constant", value_t=torch.tensor(ends))
    if steps is None:
        return g.op("Slice", input, starts, ends, axes)
    steps = g.op("Constant", value_t=torch.tensor(steps))
    return g.op("Slice", input, starts, ends, axes, steps)


def slice(g, self, *args):
    if len(args) == 4:

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 218 Column: 46

                     (not isinstance(dim, int) and dim.node().kind() != 'onnx::Constant'):
        dynamic_slice = True
        if is_start_none:
            start = g.op("Constant", value_t=torch.tensor(0))
        if is_end_none:
            end = g.op("Constant", value_t=torch.tensor(9223372036854775807))
    else:
        start = [0 if is_start_none else sym_help._parse_arg(start, 'i')]
        end = [9223372036854775807 if is_end_none else sym_help._parse_arg(end, 'i')]

            

Reported by Pylint.

torch/testing/_core.py
129 issues
Module 'torch' has no 'dtype' member
Error

Line: 44 Column: 24

              # Helper function that returns True when the dtype is an integral dtype,
# False otherwise.
# TODO: implement numpy-like issubdtype
def is_integral(dtype: torch.dtype) -> bool:
    # Skip complex/quantized types
    dtypes = [x for x in get_all_dtypes() if x not in get_all_complex_dtypes()]
    return dtype in dtypes and not dtype.is_floating_point

def is_quantized(dtype: torch.dtype) -> bool:

            

Reported by Pylint.

Module 'torch' has no 'dtype' member
Error

Line: 49 Column: 25

                  dtypes = [x for x in get_all_dtypes() if x not in get_all_complex_dtypes()]
    return dtype in dtypes and not dtype.is_floating_point

def is_quantized(dtype: torch.dtype) -> bool:
    return dtype in (torch.quint8, torch.qint8, torch.qint32, torch.quint4x2)

# Helper function that maps a flattened index back into the given shape
# TODO: consider adding torch.unravel_index
def _unravel_index(flat_index, shape):

            

Reported by Pylint.

Module 'torch' has no 'qint32' member
Error

Line: 50 Column: 49

                  return dtype in dtypes and not dtype.is_floating_point

def is_quantized(dtype: torch.dtype) -> bool:
    return dtype in (torch.quint8, torch.qint8, torch.qint32, torch.quint4x2)

# Helper function that maps a flattened index back into the given shape
# TODO: consider adding torch.unravel_index
def _unravel_index(flat_index, shape):
    flat_index = operator.index(flat_index)

            

Reported by Pylint.

Module 'torch' has no 'quint8' member
Error

Line: 50 Column: 22

                  return dtype in dtypes and not dtype.is_floating_point

def is_quantized(dtype: torch.dtype) -> bool:
    return dtype in (torch.quint8, torch.qint8, torch.qint32, torch.quint4x2)

# Helper function that maps a flattened index back into the given shape
# TODO: consider adding torch.unravel_index
def _unravel_index(flat_index, shape):
    flat_index = operator.index(flat_index)

            

Reported by Pylint.

Module 'torch' has no 'quint4x2' member
Error

Line: 50 Column: 63

                  return dtype in dtypes and not dtype.is_floating_point

def is_quantized(dtype: torch.dtype) -> bool:
    return dtype in (torch.quint8, torch.qint8, torch.qint32, torch.quint4x2)

# Helper function that maps a flattened index back into the given shape
# TODO: consider adding torch.unravel_index
def _unravel_index(flat_index, shape):
    flat_index = operator.index(flat_index)

            

Reported by Pylint.

Module 'torch' has no 'qint8' member
Error

Line: 50 Column: 36

                  return dtype in dtypes and not dtype.is_floating_point

def is_quantized(dtype: torch.dtype) -> bool:
    return dtype in (torch.quint8, torch.qint8, torch.qint32, torch.quint4x2)

# Helper function that maps a flattened index back into the given shape
# TODO: consider adding torch.unravel_index
def _unravel_index(flat_index, shape):
    flat_index = operator.index(flat_index)

            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 59 Column: 17

                  res = []

    # Short-circuits on zero dim tensors
    if shape == torch.Size([]):
        return 0

    for size in shape[::-1]:
        res.append(flat_index % size)
        flat_index = flat_index // size

            

Reported by Pylint.

Module 'torch' has no 'bool' member
Error

Line: 108 Column: 23

                  # when rtol is zero and atol is less than one
    if (
        (is_integral(a.dtype) and rtol == 0 and atol < 1)
        or a.dtype is torch.bool
        or is_quantized(a.dtype)
    ):
        if (a == b).all().item():
            return (True, None)


            

Reported by Pylint.

Module 'torch' has no 'long' member
Error

Line: 118 Column: 23

                      # NOTE: converts to long to correctly represent differences
        # (especially between uint8 tensors)
        identity_mask = a != b
        a_flat = a.to(torch.long).flatten()
        b_flat = b.to(torch.long).flatten()
        count_non_identical = torch.sum(identity_mask, dtype=torch.long)
        diff = torch.abs(a_flat - b_flat)
        greatest_diff_index = torch.argmax(diff)
        debug_msg = ("Found {0} different element(s) (out of {1}), with the greatest "

            

Reported by Pylint.

Module 'torch' has no 'long' member
Error

Line: 119 Column: 23

                      # (especially between uint8 tensors)
        identity_mask = a != b
        a_flat = a.to(torch.long).flatten()
        b_flat = b.to(torch.long).flatten()
        count_non_identical = torch.sum(identity_mask, dtype=torch.long)
        diff = torch.abs(a_flat - b_flat)
        greatest_diff_index = torch.argmax(diff)
        debug_msg = ("Found {0} different element(s) (out of {1}), with the greatest "
                     "difference of {2} ({3} vs. {4}) occuring at index "

            

Reported by Pylint.

torch/nn/quantizable/modules/activation.py
128 issues
Module 'torch' has no '_choose_qparams_per_tensor' member
Error

Line: 235 Column: 22

                      #       and the bias_v (which are cat'ed with k/v being first).
        if converted.bias_k is not None:
            bias_k = converted._parameters.pop('bias_k')
            sc, zp = torch._choose_qparams_per_tensor(bias_k,
                                                      reduce_range=False)
            bias_k = torch.quantize_per_tensor(bias_k, sc, zp, torch.quint8)
            setattr(converted, 'bias_k', bias_k)  # noqa: B010

        if converted.bias_v is not None:

            

Reported by Pylint.

Module 'torch' has no 'quint8' member
Error

Line: 237 Column: 64

                          bias_k = converted._parameters.pop('bias_k')
            sc, zp = torch._choose_qparams_per_tensor(bias_k,
                                                      reduce_range=False)
            bias_k = torch.quantize_per_tensor(bias_k, sc, zp, torch.quint8)
            setattr(converted, 'bias_k', bias_k)  # noqa: B010

        if converted.bias_v is not None:
            bias_v = converted._parameters.pop('bias_v')
            sc, zp = torch._choose_qparams_per_tensor(bias_k,

            

Reported by Pylint.

Module 'torch' has no 'quantize_per_tensor' member
Error

Line: 237 Column: 22

                          bias_k = converted._parameters.pop('bias_k')
            sc, zp = torch._choose_qparams_per_tensor(bias_k,
                                                      reduce_range=False)
            bias_k = torch.quantize_per_tensor(bias_k, sc, zp, torch.quint8)
            setattr(converted, 'bias_k', bias_k)  # noqa: B010

        if converted.bias_v is not None:
            bias_v = converted._parameters.pop('bias_v')
            sc, zp = torch._choose_qparams_per_tensor(bias_k,

            

Reported by Pylint.

Module 'torch' has no '_choose_qparams_per_tensor' member
Error

Line: 242 Column: 22

              
        if converted.bias_v is not None:
            bias_v = converted._parameters.pop('bias_v')
            sc, zp = torch._choose_qparams_per_tensor(bias_k,
                                                      reduce_range=False)
            bias_v = torch.quantize_per_tensor(bias_v, sc, zp, torch.quint8)
            setattr(converted, 'bias_v', bias_v)  # noqa: B010

        return converted

            

Reported by Pylint.

Module 'torch' has no 'quantize_per_tensor' member
Error

Line: 244 Column: 22

                          bias_v = converted._parameters.pop('bias_v')
            sc, zp = torch._choose_qparams_per_tensor(bias_k,
                                                      reduce_range=False)
            bias_v = torch.quantize_per_tensor(bias_v, sc, zp, torch.quint8)
            setattr(converted, 'bias_v', bias_v)  # noqa: B010

        return converted

    def forward(self,

            

Reported by Pylint.

Module 'torch' has no 'quint8' member
Error

Line: 244 Column: 64

                          bias_v = converted._parameters.pop('bias_v')
            sc, zp = torch._choose_qparams_per_tensor(bias_k,
                                                      reduce_range=False)
            bias_v = torch.quantize_per_tensor(bias_v, sc, zp, torch.quint8)
            setattr(converted, 'bias_v', bias_v)  # noqa: B010

        return converted

    def forward(self,

            

Reported by Pylint.

Module 'torch' has no 'float64' member
Error

Line: 336 Column: 75

                      q = self.q_scaling_product.mul_scalar(q, scaling)

        if attn_mask is not None:
            assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
                attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
                'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
            if attn_mask.dtype == torch.uint8:
                warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
                attn_mask = attn_mask.to(torch.bool)

            

Reported by Pylint.

Module 'torch' has no 'float32' member
Error

Line: 336 Column: 39

                      q = self.q_scaling_product.mul_scalar(q, scaling)

        if attn_mask is not None:
            assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
                attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
                'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
            if attn_mask.dtype == torch.uint8:
                warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
                attn_mask = attn_mask.to(torch.bool)

            

Reported by Pylint.

Module 'torch' has no 'bool' member
Error

Line: 337 Column: 106

              
        if attn_mask is not None:
            assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
                attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
                'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
            if attn_mask.dtype == torch.uint8:
                warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
                attn_mask = attn_mask.to(torch.bool)


            

Reported by Pylint.

Module 'torch' has no 'uint8' member
Error

Line: 337 Column: 72

              
        if attn_mask is not None:
            assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
                attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
                'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
            if attn_mask.dtype == torch.uint8:
                warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
                attn_mask = attn_mask.to(torch.bool)


            

Reported by Pylint.

test/test_nnapi.py
128 issues
Unable to import 'torch'
Error

Line: 4 Column: 1

              #!/usr/bin/env python3
import os
import ctypes
import torch
from typing import Tuple
from torch.backends._nnapi.prepare import convert_model_to_nnapi
from torch.testing._internal.common_utils import TestCase, run_tests



            

Reported by Pylint.

Unable to import 'torch.backends._nnapi.prepare'
Error

Line: 6 Column: 1

              import ctypes
import torch
from typing import Tuple
from torch.backends._nnapi.prepare import convert_model_to_nnapi
from torch.testing._internal.common_utils import TestCase, run_tests


def qpt(t, scale, zero_point, dtype=torch.quint8):
    t = torch.tensor(t)

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 7 Column: 1

              import torch
from typing import Tuple
from torch.backends._nnapi.prepare import convert_model_to_nnapi
from torch.testing._internal.common_utils import TestCase, run_tests


def qpt(t, scale, zero_point, dtype=torch.quint8):
    t = torch.tensor(t)
    return torch.quantize_per_tensor(t, scale, zero_point, dtype)

            

Reported by Pylint.

Unused argument 'zero_point'
Error

Line: 80 Column: 58

                                  # to get a nice message.
                    self.assertEqual(eager_output, nnapi_output, atol=0, rtol=0)

    def float_and_quant_and_nhwc(self, inp_float, scale, zero_point):
        torch.manual_seed(29)
        inp_quant = qpt(inp_float, 0.03, 128)
        return [
            ("float", inp_float),
            ("float-nhwc", nhwc(inp_float)),

            

Reported by Pylint.

Unused argument 'scale'
Error

Line: 80 Column: 51

                                  # to get a nice message.
                    self.assertEqual(eager_output, nnapi_output, atol=0, rtol=0)

    def float_and_quant_and_nhwc(self, inp_float, scale, zero_point):
        torch.manual_seed(29)
        inp_quant = qpt(inp_float, 0.03, 128)
        return [
            ("float", inp_float),
            ("float-nhwc", nhwc(inp_float)),

            

Reported by Pylint.

Cell variable op defined in loop
Error

Line: 278 Column: 28

                          with self.subTest(op):
                class UnaryModule(torch.nn.Module):
                    def forward(self, arg):
                        if op == "relu":
                            return torch.nn.functional.relu(arg)
                        if op == "sigmoid":
                            return torch.sigmoid(arg)
                        raise Exception("Bad op")
                self.check(UnaryModule(), torch.tensor([-1.0, 1.0]))

            

Reported by Pylint.

Cell variable op defined in loop
Error

Line: 280 Column: 28

                                  def forward(self, arg):
                        if op == "relu":
                            return torch.nn.functional.relu(arg)
                        if op == "sigmoid":
                            return torch.sigmoid(arg)
                        raise Exception("Bad op")
                self.check(UnaryModule(), torch.tensor([-1.0, 1.0]))

    def test_pointwise_binary(self):

            

Reported by Pylint.

Cell variable op defined in loop
Error

Line: 290 Column: 28

                          with self.subTest(op):
                class BinaryModule(torch.nn.Module):
                    def forward(self, lhs, rhs):
                        if op == "add":
                            return lhs + rhs
                        if op == "sub":
                            return lhs - rhs
                        if op == "mul":
                            return lhs * rhs

            

Reported by Pylint.

Cell variable op defined in loop
Error

Line: 292 Column: 28

                                  def forward(self, lhs, rhs):
                        if op == "add":
                            return lhs + rhs
                        if op == "sub":
                            return lhs - rhs
                        if op == "mul":
                            return lhs * rhs
                        if op == "div":
                            return lhs / rhs

            

Reported by Pylint.

Cell variable op defined in loop
Error

Line: 294 Column: 28

                                          return lhs + rhs
                        if op == "sub":
                            return lhs - rhs
                        if op == "mul":
                            return lhs * rhs
                        if op == "div":
                            return lhs / rhs
                        raise Exception("Bad op")


            

Reported by Pylint.

test/onnx/test_pytorch_onnx_caffe2_quantized.py
127 issues
Unable to import 'torch.onnx'
Error

Line: 3 Column: 1

              import numpy as np
import unittest
import torch.onnx
import torch.nn as nn
import torch.nn.quantized as nnq
import io

import onnx
import caffe2.python.onnx.backend as c2

            

Reported by Pylint.

Unable to import 'torch.nn'
Error

Line: 4 Column: 1

              import numpy as np
import unittest
import torch.onnx
import torch.nn as nn
import torch.nn.quantized as nnq
import io

import onnx
import caffe2.python.onnx.backend as c2

            

Reported by Pylint.

Unable to import 'torch.nn.quantized'
Error

Line: 5 Column: 1

              import unittest
import torch.onnx
import torch.nn as nn
import torch.nn.quantized as nnq
import io

import onnx
import caffe2.python.onnx.backend as c2


            

Reported by Pylint.

Unable to import 'onnx'
Error

Line: 8 Column: 1

              import torch.nn.quantized as nnq
import io

import onnx
import caffe2.python.onnx.backend as c2

class TestQuantizedOps(unittest.TestCase):



            

Reported by Pylint.

Unable to import 'caffe2.python.onnx.backend'
Error

Line: 9 Column: 1

              import io

import onnx
import caffe2.python.onnx.backend as c2

class TestQuantizedOps(unittest.TestCase):


    def generic_test(self, model, sample_inputs, input_names=None, decimal=3, relaxed_check=False):

            

Reported by Pylint.

Using deprecated method assert_()
Error

Line: 46 Column: 13

              
            # This check had to be changed to account for changes in
            # qnnpack's requant logic.
            np.testing.assert_(max_diff <= 1, "Maximum absolute difference must be less than 1")
        else:
            np.testing.assert_almost_equal(output.detach().numpy(), caffe_res, decimal=decimal)


    def generic_unary_test(self, op):

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 86 Column: 37

                  def test_quantized_relu(self):
        self.generic_unary_test(torch.nn.ReLU())

    def export_to_onnx(self, model, input, input_names):
        outputs = model(input)

        traced = torch.jit.trace(model, input)
        buf = io.BytesIO()
        torch.jit.save(traced, buf)

            

Reported by Pylint.

Using deprecated method assert_()
Error

Line: 134 Column: 9

                      # Permute pytorch output to NHWC
        # This check had to be changed to account for changes in
        # qnnpack's requant logic.
        np.testing.assert_(max_diff <= 1, "Maximum absolute difference must be less than 1")

    def test_qconv_model(self):
        class ConvModel(torch.nn.Module):
            def __init__(self):
                super(ConvModel, self).__init__()

            

Reported by Pylint.

Using deprecated method assert_()
Error

Line: 167 Column: 9

                      # Permute pytorch output to NHWC
        # This check had to be changed to account for changes in
        # qnnpack's requant logic.
        np.testing.assert_(max_diff <= 1, "Maximum absolute difference must be less than 1")

    def test_upsample(self):
        class QUpsampleModule(torch.nn.Module):
            def __init__(self):
                super(QUpsampleModule, self).__init__()

            

Reported by Pylint.

Unused variable 'i'
Error

Line: 300 Column: 21

                              self.conv1 = nn.Conv2d(3, 3, 1)
                self.relu1 = nn.ReLU(inplace=False)
                layers = []
                for i in range(3):
                    layers.append(ConvBNReLUModule())
                self.features = nn.Sequential(*layers)
                head = [nn.Linear(300, 10), nn.ReLU(inplace=False)]
                self.classifier = nn.Sequential(*head)
                self.seq = nn.Sequential()

            

Reported by Pylint.

test/test_shape_ops.py
127 issues
Unable to import 'torch'
Error

Line: 1 Column: 1

              import torch
import numpy as np

from itertools import product, combinations, permutations, chain
from functools import partial
import random
import warnings

from torch._six import nan

            

Reported by Pylint.

Unable to import 'torch._six'
Error

Line: 9 Column: 1

              import random
import warnings

from torch._six import nan
from torch.testing._internal.common_utils import (
    TestCase, run_tests, make_tensor, torch_to_numpy_dtype_dict)
from torch.testing._internal.common_device_type import (
    instantiate_device_type_tests, onlyCPU, onlyCUDA, dtypes, onlyOnCPUAndCUDA,
    dtypesIfCPU, dtypesIfCUDA, largeTensorTest)

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 10 Column: 1

              import warnings

from torch._six import nan
from torch.testing._internal.common_utils import (
    TestCase, run_tests, make_tensor, torch_to_numpy_dtype_dict)
from torch.testing._internal.common_device_type import (
    instantiate_device_type_tests, onlyCPU, onlyCUDA, dtypes, onlyOnCPUAndCUDA,
    dtypesIfCPU, dtypesIfCUDA, largeTensorTest)


            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_device_type'
Error

Line: 12 Column: 1

              from torch._six import nan
from torch.testing._internal.common_utils import (
    TestCase, run_tests, make_tensor, torch_to_numpy_dtype_dict)
from torch.testing._internal.common_device_type import (
    instantiate_device_type_tests, onlyCPU, onlyCUDA, dtypes, onlyOnCPUAndCUDA,
    dtypesIfCPU, dtypesIfCUDA, largeTensorTest)

# TODO: replace with make_tensor
def _generate_input(shape, dtype, device, with_extremal):

            

Reported by Pylint.

TODO: replace with make_tensor
Error

Line: 16 Column: 3

                  instantiate_device_type_tests, onlyCPU, onlyCUDA, dtypes, onlyOnCPUAndCUDA,
    dtypesIfCPU, dtypesIfCUDA, largeTensorTest)

# TODO: replace with make_tensor
def _generate_input(shape, dtype, device, with_extremal):
    if shape == ():
        x = torch.tensor((), dtype=dtype, device=device)
    else:
        if dtype.is_floating_point or dtype.is_complex:

            

Reported by Pylint.

TODO: update to work on CUDA, too
Error

Line: 48 Column: 3

              
class TestShapeOps(TestCase):

    # TODO: update to work on CUDA, too
    @onlyCPU
    def test_unbind(self, device):
        x = torch.rand(2, 3, 4, 5)
        for dim in range(4):
            res = torch.unbind(x, dim)

            

Reported by Pylint.

Unused argument 'device'
Error

Line: 50 Column: 27

              
    # TODO: update to work on CUDA, too
    @onlyCPU
    def test_unbind(self, device):
        x = torch.rand(2, 3, 4, 5)
        for dim in range(4):
            res = torch.unbind(x, dim)
            res2 = x.unbind(dim)
            self.assertEqual(x.size(dim), len(res))

            

Reported by Pylint.

TODO: update to work on CUDA, too?
Error

Line: 61 Column: 3

                              self.assertEqual(x.select(dim, i), res[i])
                self.assertEqual(x.select(dim, i), res2[i])

    # TODO: update to work on CUDA, too?
    @onlyCPU
    def test_tolist(self, device):
        list0D = []
        tensor0D = torch.tensor(list0D)
        self.assertEqual(tensor0D.tolist(), list0D)

            

Reported by Pylint.

Unused argument 'device'
Error

Line: 63 Column: 27

              
    # TODO: update to work on CUDA, too?
    @onlyCPU
    def test_tolist(self, device):
        list0D = []
        tensor0D = torch.tensor(list0D)
        self.assertEqual(tensor0D.tolist(), list0D)

        table1D = [1., 2., 3.]

            

Reported by Pylint.

Unused argument 'idx'
Error

Line: 140 Column: 55

                                  if nd == 0:
                        continue

                    def make_index_negative(sequence, idx):
                        sequence = list(sequence)
                        sequence[random_idx] = sequence[random_idx] - nd
                        return tuple(src_sequence)

                    for src_sequence in permutations(range(nd), r=random.randint(1, nd)):

            

Reported by Pylint.

torch/fx/graph.py
125 issues
Attempted relative import beyond top-level package
Error

Line: 1 Column: 1

              from .node import Node, Argument, Target, map_arg, _type_repr, _get_qualified_name
import torch.utils._pytree as pytree
from . import _pytree as fx_pytree

from typing import TYPE_CHECKING, Callable, Any, List, Dict, NamedTuple, Optional, Tuple, Set, FrozenSet, Type
from dataclasses import dataclass
from contextlib import contextmanager
import copy
import torch

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 3 Column: 1

              from .node import Node, Argument, Target, map_arg, _type_repr, _get_qualified_name
import torch.utils._pytree as pytree
from . import _pytree as fx_pytree

from typing import TYPE_CHECKING, Callable, Any, List, Dict, NamedTuple, Optional, Tuple, Set, FrozenSet, Type
from dataclasses import dataclass
from contextlib import contextmanager
import copy
import torch

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 18 Column: 5

              

if TYPE_CHECKING:
    from .graph_module import GraphModule  # noqa: F401
    from ._symbolic_trace import Tracer   # noqa: F401


# Mapping of builtins to their `typing` equivalent.
_origin_type_map = {

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 19 Column: 5

              
if TYPE_CHECKING:
    from .graph_module import GraphModule  # noqa: F401
    from ._symbolic_trace import Tracer   # noqa: F401


# Mapping of builtins to their `typing` equivalent.
_origin_type_map = {
    list: List,

            

Reported by Pylint.

Module 'torch' has no 'device' member
Error

Line: 54 Column: 64

              _register_custom_builtin('nan', 'from math import nan', math.nan)
_register_custom_builtin('NoneType', 'NoneType = type(None)', type(None))
_register_custom_builtin('torch', 'import torch', torch)
_register_custom_builtin('device', 'from torch import device', torch.device)
_register_custom_builtin('fx_pytree', 'import torch.fx._pytree as fx_pytree', fx_pytree)
_register_custom_builtin('pytree', 'import torch.utils._pytree as pytree', pytree)


def _is_magic(x: str) -> bool:

            

Reported by Pylint.

Module 'torch' has no 'device' member
Error

Line: 826 Column: 47

              
            Returns: the global name that should be used to reference 'obj' in generated source.
            """
            if _is_from_torch(obj) and obj != torch.device:  # to support registering torch.device
                # HACK: workaround for how torch custom ops are registered. We
                # can't import them like normal modules so they must retain their
                # fully qualified name.
                return _get_qualified_name(obj)


            

Reported by Pylint.

Redefining built-in 'type'
Error

Line: 212 Column: 24

                  def __enter__(self):
        pass

    def __exit__(self, type, value, tb):
        self.graph._insert = self.orig_insert

class _node_list:
    def __init__(self, graph: 'Graph', direction: str = '_next'):
        assert direction in ['_next', '_prev']

            

Reported by Pylint.

Unused variable 'args_spec'
Error

Line: 414 Column: 20

                      return n

    def flatten_inps(self, *args):
        flat_args, args_spec = pytree.tree_flatten(args)
        return flat_args

    def unflatten_outs(self, out):
        if self._pytree_info is None:
            return out

            

Reported by Pylint.

Access to a protected member _remove_from_list of a client class
Error

Line: 438 Column: 9

                          raise RuntimeError(f'Tried to erase Node {to_erase} but it still had {len(to_erase.users)} '
                               f'users in the graph: {to_erase.users}!')

        to_erase._remove_from_list()
        to_erase._erased = True  # iterators may retain handles to erased nodes
        self._len -= 1

        # Null out this Node's argument nodes so that the Nodes referred to
        # can update their ``users`` accordingly

            

Reported by Pylint.

Access to a protected member _erased of a client class
Error

Line: 439 Column: 9

                                             f'users in the graph: {to_erase.users}!')

        to_erase._remove_from_list()
        to_erase._erased = True  # iterators may retain handles to erased nodes
        self._len -= 1

        # Null out this Node's argument nodes so that the Nodes referred to
        # can update their ``users`` accordingly
        new_args = map_arg(to_erase.args, lambda n: None)

            

Reported by Pylint.