The following issues were found
test/test_overrides.py
263 issues
Line: 1
Column: 1
import torch
import numpy as np
import inspect
import functools
import pprint
import pickle
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.overrides import (
Reported by Pylint.
Line: 8
Column: 1
import pprint
import pickle
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.overrides import (
handle_torch_function,
has_torch_function,
get_overridable_functions,
get_testing_overrides,
Reported by Pylint.
Line: 9
Column: 1
import pickle
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.overrides import (
handle_torch_function,
has_torch_function,
get_overridable_functions,
get_testing_overrides,
is_tensor_method_or_property
Reported by Pylint.
Line: 555
Column: 5
def generate_tensor_like_override_tests(cls):
from torch.testing._internal.generated.annotated_fn_args import annotated_args
def test_generator(func, override):
# If func corresponds to a torch.Tensor method or property.
if is_tensor_method_or_property(func):
# Generate an instance by using SubTensor,
Reported by Pylint.
Line: 798
Column: 9
class TestGradCheckOverride(TestCase):
"Test that wrappers work with gradcheck."
def test_gradcheck(self):
from torch.testing._internal.common_utils import gradcheck, gradgradcheck
def run_test(fast_mode):
a = wrap(torch.tensor(5.0, dtype=torch.double))
b = wrap(torch.tensor(6.0, dtype=torch.double))
Reported by Pylint.
Line: 880
Column: 9
class TestBroadcastAllOverride(TestCase):
""" test for gh-37141 """
def test_broadcast_all(self):
from torch.distributions.utils import broadcast_all
a = torch.tensor([1.2, 3.4, 5.6])
a_w = Wrapper(a)
b = torch.tensor(5.0)
b_w = Wrapper(b)
c = torch.tensor([5.0, 5.0, 5.0])
Reported by Pylint.
Line: 131
Column: 40
def tensor(self):
return self._i * torch.eye(self._N)
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func not in self.handled_functions:
return NotImplemented
return self.handled_functions[func](*args, **kwargs)
Reported by Pylint.
Line: 149
Column: 28
@implements_diagonal(torch.mean)
def mean(mat):
return float(mat._i) / mat._N
@implements_diagonal(torch.mm)
def diagonal_mm(mat1, mat2):
return 0
Reported by Pylint.
Line: 149
Column: 18
@implements_diagonal(torch.mean)
def mean(mat):
return float(mat._i) / mat._N
@implements_diagonal(torch.mm)
def diagonal_mm(mat1, mat2):
return 0
Reported by Pylint.
Line: 152
Column: 23
return float(mat._i) / mat._N
@implements_diagonal(torch.mm)
def diagonal_mm(mat1, mat2):
return 0
@implements_diagonal(torch.div)
def diagonal_div(input, other, out=None):
return -1
Reported by Pylint.
test/quantization/core/test_quantized_module.py
257 issues
Line: 1
Column: 1
import torch
import torch.nn as nn
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.quantized._reference as nniqr
import torch.nn.quantized as nnq
import torch.nn.quantized._reference as nnqr
import torch.nn.quantized.dynamic as nnqd
import torch.nn.functional as F
Reported by Pylint.
Line: 2
Column: 1
import torch
import torch.nn as nn
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.quantized._reference as nniqr
import torch.nn.quantized as nnq
import torch.nn.quantized._reference as nnqr
import torch.nn.quantized.dynamic as nnqd
import torch.nn.functional as F
Reported by Pylint.
Line: 3
Column: 1
import torch
import torch.nn as nn
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.quantized._reference as nniqr
import torch.nn.quantized as nnq
import torch.nn.quantized._reference as nnqr
import torch.nn.quantized.dynamic as nnqd
import torch.nn.functional as F
Reported by Pylint.
Line: 4
Column: 1
import torch
import torch.nn as nn
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.quantized._reference as nniqr
import torch.nn.quantized as nnq
import torch.nn.quantized._reference as nnqr
import torch.nn.quantized.dynamic as nnqd
import torch.nn.functional as F
Reported by Pylint.
Line: 5
Column: 1
import torch.nn as nn
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.quantized._reference as nniqr
import torch.nn.quantized as nnq
import torch.nn.quantized._reference as nnqr
import torch.nn.quantized.dynamic as nnqd
import torch.nn.functional as F
import torch.quantization
Reported by Pylint.
Line: 6
Column: 1
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.quantized._reference as nniqr
import torch.nn.quantized as nnq
import torch.nn.quantized._reference as nnqr
import torch.nn.quantized.dynamic as nnqd
import torch.nn.functional as F
import torch.quantization
Reported by Pylint.
Line: 7
Column: 1
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.quantized._reference as nniqr
import torch.nn.quantized as nnq
import torch.nn.quantized._reference as nnqr
import torch.nn.quantized.dynamic as nnqd
import torch.nn.functional as F
import torch.quantization
from torch.quantization import (
Reported by Pylint.
Line: 8
Column: 1
import torch.nn.intrinsic.quantized._reference as nniqr
import torch.nn.quantized as nnq
import torch.nn.quantized._reference as nnqr
import torch.nn.quantized.dynamic as nnqd
import torch.nn.functional as F
import torch.quantization
from torch.quantization import (
get_default_static_quant_module_mappings,
Reported by Pylint.
Line: 9
Column: 1
import torch.nn.quantized as nnq
import torch.nn.quantized._reference as nnqr
import torch.nn.quantized.dynamic as nnqd
import torch.nn.functional as F
import torch.quantization
from torch.quantization import (
get_default_static_quant_module_mappings,
default_float_qparams_observer,
Reported by Pylint.
Line: 10
Column: 1
import torch.nn.quantized._reference as nnqr
import torch.nn.quantized.dynamic as nnqd
import torch.nn.functional as F
import torch.quantization
from torch.quantization import (
get_default_static_quant_module_mappings,
default_float_qparams_observer,
PerChannelMinMaxObserver,
Reported by Pylint.
torch/functional.py
256 issues
Line: 7
Column: 1
import torch
import torch.nn.functional as F
from ._lowrank import svd_lowrank, pca_lowrank
from .overrides import (
has_torch_function, has_torch_function_unary, has_torch_function_variadic,
handle_torch_function)
from ._jit_internal import boolean_dispatch, List
from ._jit_internal import _overload as overload
Reported by Pylint.
Line: 8
Column: 1
import torch
import torch.nn.functional as F
from ._lowrank import svd_lowrank, pca_lowrank
from .overrides import (
has_torch_function, has_torch_function_unary, has_torch_function_variadic,
handle_torch_function)
from ._jit_internal import boolean_dispatch, List
from ._jit_internal import _overload as overload
Reported by Pylint.
Line: 11
Column: 1
from .overrides import (
has_torch_function, has_torch_function_unary, has_torch_function_variadic,
handle_torch_function)
from ._jit_internal import boolean_dispatch, List
from ._jit_internal import _overload as overload
Tensor = torch.Tensor
from torch import _VF
Reported by Pylint.
Line: 12
Column: 1
has_torch_function, has_torch_function_unary, has_torch_function_variadic,
handle_torch_function)
from ._jit_internal import boolean_dispatch, List
from ._jit_internal import _overload as overload
Tensor = torch.Tensor
from torch import _VF
__all__ = [
Reported by Pylint.
Line: 72
Column: 12
# This wrapper exists to support variadic args.
if has_torch_function(tensors):
return handle_torch_function(broadcast_tensors, tensors, *tensors)
return _VF.broadcast_tensors(tensors) # type: ignore[attr-defined]
def broadcast_shapes(*shapes):
r"""broadcast_shapes(*shapes) -> Size
Reported by Pylint.
Line: 103
Column: 18
# This wrapper exists to support variadic args.
# TODO Movie this to C++ once the jit has better support for torch.Size.
with torch.no_grad():
scalar = torch.zeros((), device="cpu")
tensors = [scalar.expand(shape) for shape in shapes]
tensors = broadcast_tensors(*tensors)
return tensors[0].shape
Reported by Pylint.
Line: 327
Column: 12
# in the original implementation this line is omitted
return einsum(equation, *_operands)
return _VF.einsum(equation, operands) # type: ignore[attr-defined]
# This wrapper exists to support variadic args.
if TYPE_CHECKING:
# The JIT doesn't understand Union, so only add type annotation for mypy
Reported by Pylint.
Line: 426
Column: 12
if len(tensors) == 1 and isinstance(tensors[0], (list, tuple)):
# the old interface of passing the operands as one list argument
tensors = tensors[0] # type: ignore[assignment]
return _VF.meshgrid(tensors) # type: ignore[attr-defined]
def stft(input: Tensor, n_fft: int, hop_length: Optional[int] = None,
win_length: Optional[int] = None, window: Optional[Tensor] = None,
center: bool = True, pad_mode: str = 'reflect', normalized: bool = False,
Reported by Pylint.
Line: 553
Column: 12
pad = int(n_fft // 2)
input = F.pad(input.view(extended_shape), [pad, pad], pad_mode)
input = input.view(input.shape[-signal_dim:])
return _VF.stft(input, n_fft, hop_length, win_length, window, # type: ignore[attr-defined]
normalized, onesided, return_complex)
def istft(input: Tensor, n_fft: int, hop_length: Optional[int] = None,
win_length: Optional[int] = None, window: Optional[Tensor] = None,
center: bool = True, normalized: bool = False,
Reported by Pylint.
Line: 627
Column: 12
window=window, center=center, normalized=normalized, onesided=onesided,
length=length, return_complex=return_complex)
return _VF.istft(input, n_fft, hop_length, win_length, window, center, # type: ignore[attr-defined]
normalized, onesided, length, return_complex)
del torch.unique_dim
Reported by Pylint.
test/test_namedtensor.py
256 issues
Line: 2
Column: 1
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY
from torch.testing._internal.common_cuda import TEST_CUDA
from collections import namedtuple, OrderedDict
import itertools
import functools
import torch
from torch import Tensor
import torch.nn.functional as F
Reported by Pylint.
Line: 3
Column: 1
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY
from torch.testing._internal.common_cuda import TEST_CUDA
from collections import namedtuple, OrderedDict
import itertools
import functools
import torch
from torch import Tensor
import torch.nn.functional as F
Reported by Pylint.
Line: 7
Column: 1
from collections import namedtuple, OrderedDict
import itertools
import functools
import torch
from torch import Tensor
import torch.nn.functional as F
from multiprocessing.reduction import ForkingPickler
import pickle
import io
Reported by Pylint.
Line: 8
Column: 1
import itertools
import functools
import torch
from torch import Tensor
import torch.nn.functional as F
from multiprocessing.reduction import ForkingPickler
import pickle
import io
import sys
Reported by Pylint.
Line: 9
Column: 1
import functools
import torch
from torch import Tensor
import torch.nn.functional as F
from multiprocessing.reduction import ForkingPickler
import pickle
import io
import sys
import warnings
Reported by Pylint.
Line: 18
Column: 5
def pass_name_to_python_arg_parser(name):
x = torch.empty(2, names=(name,))
def flatten(lst):
return [item for sublist in lst for item in sublist]
Reported by Pylint.
Line: 69
Column: 3
class TestNamedTensor(TestCase):
def test_aaa_must_run_first_check_experimental_warning(self):
# TODO(rzou): It would be nice for this to be a "real" python warning.
# Right now this error message only prints once and doesn't respect
# warnings.simplefilter behavior (where python users can control whether
# or not to display warnings once, all the time, or never).
with warnings.catch_warnings(record=True) as warns:
x = torch.randn(3, 3, names=('N', 'C'))
Reported by Pylint.
Line: 74
Column: 13
# warnings.simplefilter behavior (where python users can control whether
# or not to display warnings once, all the time, or never).
with warnings.catch_warnings(record=True) as warns:
x = torch.randn(3, 3, names=('N', 'C'))
self.assertEqual(len(warns), 1)
self.assertTrue(str(warns[0].message).startswith(
'Named tensors and all their associated APIs are an experimental feature'))
def test_trivial(self):
Reported by Pylint.
Line: 84
Column: 9
def _test_name_inference(self, op, args=(), expected_names=(), device='cpu',
maybe_raises_regex=None):
casted_args = [arg.to(device) if isinstance(arg, torch.Tensor) else arg
for arg in args]
if maybe_raises_regex is not None:
with self.assertRaisesRegex(RuntimeError, maybe_raises_regex):
result = op(*args)
return
Reported by Pylint.
Line: 95
Column: 3
msg='Name inference for {} on device {} failed'.format(
op.__name__, device))
# TODO(rzou): Some form of this check should be added to self.assertEqual.
# Right now I don't know what it should look like.
def assertTensorDataAndNamesEqual(self, x, y):
self.assertEqual(x.names, y.names)
unnamed_x = x.rename(None)
unnamed_y = y.rename(None)
Reported by Pylint.
caffe2/python/core_test.py
255 issues
Line: 520
Column: 14
a, b = net.AddExternalInput("a", "b")
net.Mul([a, b], "c"); cf = currentframe(); line = cf.f_lineno
func = cf.f_code.co_name
ws = workspace.C.Workspace()
with self.assertRaises(Exception):
ws.run(net)
with self.assertRaises(Exception):
ws.create_net(net)
self.op_name_check(net, cf, line, func)
Reported by Pylint.
Line: 532
Column: 14
a = net.AddExternalInput("a")
net.Split(a, ["b", "c"], axis=0); cf = currentframe(); line = cf.f_lineno
func = cf.f_code.co_name
ws = workspace.C.Workspace()
ws.create_blob(str(a)).feed(np.array([1, 2, 3], dtype=np.float32))
ws.create_net(net)
with self.assertRaises(Exception):
ws.run(net)
self.op_name_check(net, cf, line, func)
Reported by Pylint.
Line: 160
Column: 17
all_blobs |= set(op.input) | set(op.output)
self.assertEqual(all_blobs, inputs | outputs | internals)
# create net to make sure its valid
for input in inputs:
workspace.FeedBlob(input, np.array([]))
workspace.CreateNet(net)
n2, (d22, ) = n.ClonePartial('f1', {a1: 'a11', a2: 'a22'}, [d])
net_assert(
Reported by Pylint.
Line: 198
Column: 9
self.assertEqual(str(e22), 'f7/e')
self.assertEqual(str(f22), 'f7/f')
params._CheckLookupTables()
n._CheckLookupTables()
def test_mask_clone_update_external_list(self):
n = core.Net('original')
a1 = n.AddExternalInput('a1')
Reported by Pylint.
Line: 199
Column: 9
self.assertEqual(str(f22), 'f7/f')
params._CheckLookupTables()
n._CheckLookupTables()
def test_mask_clone_update_external_list(self):
n = core.Net('original')
a1 = n.AddExternalInput('a1')
a2 = n.AddExternalInput('a2')
Reported by Pylint.
Line: 249
Column: 17
else_net=else_net.Proto(),
)
copied = n.Clone("copied", blob_remap={"inputA": "inputX"})
if_op = copied._net.op[0]
self.assertEqual(if_op.arg[0].n.op[0].input, ["inputX"])
self.assertEqual(if_op.arg[1].n.op[0].input, ["inputB"])
class TestExternalInputs(test_util.TestCase):
Reported by Pylint.
Line: 337
Column: 5
for op in operatorDefList2:
op.debug_info = ""
self.assertEqual(operatorDefList1, operatorDefList2)
"""
Test that operators are named with different names, and that automatically
named blob names don't clash intra or inter networks.
"""
def test_next_blob(self):
def create_net():
Reported by Pylint.
Line: 386
Column: 9
add_ops()
# Force reset of lookup tables
a.Proto().name
with core.NameScope('n2'):
add_ops()
all_outputs = []
Reported by Pylint.
Line: 403
Column: 9
if i != j:
self.assertNotEqual(str(o1), str(o2))
a._CheckLookupTables()
b._CheckLookupTables()
class TestAppendNet(test_util.TestCase):
Reported by Pylint.
Line: 404
Column: 9
self.assertNotEqual(str(o1), str(o2))
a._CheckLookupTables()
b._CheckLookupTables()
class TestAppendNet(test_util.TestCase):
def test_external_inputs_merged_correctly(self):
Reported by Pylint.
torch/_lobpcg.py
253 issues
Line: 10
Column: 1
import torch
from torch import Tensor
from . import _linalg_utils as _utils
from .overrides import has_torch_function, handle_torch_function
__all__ = ['lobpcg']
Reported by Pylint.
Line: 11
Column: 1
import torch
from torch import Tensor
from . import _linalg_utils as _utils
from .overrides import has_torch_function, handle_torch_function
__all__ = ['lobpcg']
def _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U):
Reported by Pylint.
Line: 24
Column: 11
# A.grad = U (D.grad + (U^T U.grad * F)) U^T
Ut = U.transpose(-1, -2).contiguous()
res = torch.matmul(
U,
torch.matmul(
torch.diag_embed(D_grad) + torch.matmul(Ut, U_grad) * F,
Ut
)
Reported by Pylint.
Line: 26
Column: 9
Ut = U.transpose(-1, -2).contiguous()
res = torch.matmul(
U,
torch.matmul(
torch.diag_embed(D_grad) + torch.matmul(Ut, U_grad) * F,
Ut
)
)
Reported by Pylint.
Line: 27
Column: 13
res = torch.matmul(
U,
torch.matmul(
torch.diag_embed(D_grad) + torch.matmul(Ut, U_grad) * F,
Ut
)
)
return res
Reported by Pylint.
Line: 27
Column: 40
res = torch.matmul(
U,
torch.matmul(
torch.diag_embed(D_grad) + torch.matmul(Ut, U_grad) * F,
Ut
)
)
return res
Reported by Pylint.
Line: 118
Column: 22
return res
if zero_power is None:
zero_power = torch.eye(x.size(-1), x.size(-1), dtype=x.dtype, device=x.device) \
.view(*([1] * len(list(x.shape[:-2]))), x.size(-1), x.size(-1))
return _polynomial_value(poly, x, zero_power, transition)
def _vector_polynomial_value(poly, x, zero_power=None):
Reported by Pylint.
Line: 131
Column: 15
# vector-aware Horner's rule iteration
def transition(curr_poly_val, x, poly_coeff):
res = torch.addcmul(poly_coeff.unsqueeze(-1), x, curr_poly_val)
return res
if zero_power is None:
zero_power = x.new_ones(1).expand(x.shape)
Reported by Pylint.
Line: 151
Column: 11
# by the columns of U.
#
# fix generator for determinism
gen = torch.Generator(A.device)
# orthogonal complement to the span(U)
U_ortho = proj_U_ortho.matmul(
torch.randn(
(*A.shape[:-1], A.size(-1) - D.size(-1)),
Reported by Pylint.
Line: 155
Column: 9
# orthogonal complement to the span(U)
U_ortho = proj_U_ortho.matmul(
torch.randn(
(*A.shape[:-1], A.size(-1) - D.size(-1)),
dtype=A.dtype,
device=A.device,
generator=gen
)
Reported by Pylint.
test/test_indexing.py
252 issues
Line: 1
Column: 1
import torch
from torch import tensor
import unittest
import warnings
import random
from functools import reduce
import numpy as np
Reported by Pylint.
Line: 2
Column: 1
import torch
from torch import tensor
import unittest
import warnings
import random
from functools import reduce
import numpy as np
Reported by Pylint.
Line: 11
Column: 1
import numpy as np
from torch.testing._internal.common_utils import TestCase, run_tests, make_tensor
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, dtypes, dtypesIfCPU, dtypesIfCUDA,
onlyOnCPUAndCUDA)
Reported by Pylint.
Line: 12
Column: 1
import numpy as np
from torch.testing._internal.common_utils import TestCase, run_tests, make_tensor
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, dtypes, dtypesIfCPU, dtypesIfCUDA,
onlyOnCPUAndCUDA)
class TestIndexing(TestCase):
Reported by Pylint.
Line: 62
Column: 3
reference = consec((5, 5, 5)).to(device)
idx = torch.LongTensor([2, 4]).to(device)
self.assertEqual(reference[idx], torch.stack([reference[2], reference[4]]))
# TODO: enable one indexing is implemented like in numpy
# self.assertEqual(reference[2, idx], torch.stack([reference[2, 2], reference[2, 4]]))
# self.assertEqual(reference[3, idx, 1], torch.stack([reference[3, 2], reference[3, 4]])[:, 1])
# None indexing
self.assertEqual(reference[2, None], reference[2].unsqueeze(0))
Reported by Pylint.
Line: 91
Column: 9
torch.stack([reference[:, 2, 1], reference[:, 2, 3], reference[:, 2, 5]], 1))
lst = [list(range(i, i + 10)) for i in range(0, 100, 10)]
tensor = torch.DoubleTensor(lst).to(device)
for _i in range(100):
idx1_start = random.randrange(10)
idx1_end = idx1_start + random.randrange(1, 10 - idx1_start + 1)
idx1_step = random.randrange(1, 8)
idx1 = slice(idx1_start, idx1_end, idx1_step)
Reported by Pylint.
Line: 408
Column: 13
# verify too many indices fails
with self.assertRaises(IndexError):
reference[ri([1]), ri([0, 2]), ri([3])]
# test invalid index fails
reference = torch.empty(10, dtype=dtype, device=device)
# can't test cuda because it is a device assert
if not reference.is_cuda:
Reported by Pylint.
Line: 416
Column: 21
if not reference.is_cuda:
for err_idx in (10, -11):
with self.assertRaisesRegex(IndexError, r'out of'):
reference[err_idx]
with self.assertRaisesRegex(IndexError, r'out of'):
reference[torch.LongTensor([err_idx]).to(device)]
with self.assertRaisesRegex(IndexError, r'out of'):
reference[[err_idx]]
Reported by Pylint.
Line: 418
Column: 21
with self.assertRaisesRegex(IndexError, r'out of'):
reference[err_idx]
with self.assertRaisesRegex(IndexError, r'out of'):
reference[torch.LongTensor([err_idx]).to(device)]
with self.assertRaisesRegex(IndexError, r'out of'):
reference[[err_idx]]
def tensor_indices_to_np(tensor, indices):
# convert the Torch Tensor to a numpy array
Reported by Pylint.
Line: 420
Column: 21
with self.assertRaisesRegex(IndexError, r'out of'):
reference[torch.LongTensor([err_idx]).to(device)]
with self.assertRaisesRegex(IndexError, r'out of'):
reference[[err_idx]]
def tensor_indices_to_np(tensor, indices):
# convert the Torch Tensor to a numpy array
tensor = tensor.to(device='cpu')
npt = tensor.numpy()
Reported by Pylint.
test/jit/test_typing.py
247 issues
Line: 4
Column: 1
import os
import sys
import torch
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import IS_WINDOWS
from collections import namedtuple
from typing import List, Tuple, Optional, Dict
Reported by Pylint.
Line: 5
Column: 1
import sys
import torch
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import IS_WINDOWS
from collections import namedtuple
from typing import List, Tuple, Optional, Dict
Reported by Pylint.
Line: 6
Column: 1
import torch
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import IS_WINDOWS
from collections import namedtuple
from typing import List, Tuple, Optional, Dict
# Make the helper files in test/ importable
Reported by Pylint.
Line: 7
Column: 1
import torch
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import IS_WINDOWS
from collections import namedtuple
from typing import List, Tuple, Optional, Dict
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 171
Column: 20
l1 = [1, 2, "foo", 3]
l2 = ["foo", "bar", "baz", "qux"]
d: Dict[int, str] = {k : v for k, v in zip(l1, l2)}
return l
with self.assertRaisesRegex(RuntimeError, "Dict type annotation"
r" `Dict\[int, str\]` did not match"
" the type of an actual key type"):
torch.jit.script(fn)
Reported by Pylint.
Line: 183
Column: 20
l1 = ["foo", "bar", "baz", "qux"]
l2 = [1, 2, "foo", 3]
d: Dict[str, int] = {k : v for k, v in zip(l1, l2)}
return l
with self.assertRaisesRegex(RuntimeError, "Dict type annotation"
r" `Dict\[str, int\]` did not match"
" the type of an actual value "
"type"):
Reported by Pylint.
Line: 447
Column: 17
"comprehension, found "
"Tuple[int, str]"):
@torch.jit.script
def fn():
d: Tuple[int, str] = {i : chr(i + 65) for i in range(4)}
return d
def test_dict_comprehension_scope(self):
def comprehension_can_access_outer_scope_variables():
Reported by Pylint.
Line: 170
Column: 13
def fn():
l1 = [1, 2, "foo", 3]
l2 = ["foo", "bar", "baz", "qux"]
d: Dict[int, str] = {k : v for k, v in zip(l1, l2)}
return l
with self.assertRaisesRegex(RuntimeError, "Dict type annotation"
r" `Dict\[int, str\]` did not match"
" the type of an actual key type"):
Reported by Pylint.
Line: 182
Column: 13
def fn():
l1 = ["foo", "bar", "baz", "qux"]
l2 = [1, 2, "foo", 3]
d: Dict[str, int] = {k : v for k, v in zip(l1, l2)}
return l
with self.assertRaisesRegex(RuntimeError, "Dict type annotation"
r" `Dict\[str, int\]` did not match"
" the type of an actual value "
Reported by Pylint.
Line: 193
Column: 30
def test_dict_invalid_annotations(self):
# Check for invalid value type annotation
def wrong_value_type(dictionary: Dict[str, torch.jit.ScriptModule]):
return
with self.assertRaisesRegex(ValueError, "Unknown type annotation"):
torch.jit.script(wrong_value_type)
# Check for invalid key type annotation
Reported by Pylint.
torch/quantization/observer.py
245 issues
Line: 10
Column: 1
import torch
import torch.nn as nn
from .utils import check_min_max_valid, calculate_qmin_qmax
class _PartialWrapper(object):
def __init__(self, p):
self.p = p
Reported by Pylint.
Line: 161
Column: 15
def __init__(
self,
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
factory_kwargs=None,
Reported by Pylint.
Line: 162
Column: 17
def __init__(
self,
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
factory_kwargs=None,
) -> None:
Reported by Pylint.
Line: 178
Column: 20
)
self.reduce_range = reduce_range
self.register_buffer(
"eps", torch.tensor([torch.finfo(torch.float32).eps], **factory_kwargs)
)
assert self.qscheme in (
torch.per_tensor_affine,
torch.per_tensor_symmetric,
torch.per_channel_affine,
Reported by Pylint.
Line: 178
Column: 34
)
self.reduce_range = reduce_range
self.register_buffer(
"eps", torch.tensor([torch.finfo(torch.float32).eps], **factory_kwargs)
)
assert self.qscheme in (
torch.per_tensor_affine,
torch.per_tensor_symmetric,
torch.per_channel_affine,
Reported by Pylint.
Line: 178
Column: 46
)
self.reduce_range = reduce_range
self.register_buffer(
"eps", torch.tensor([torch.finfo(torch.float32).eps], **factory_kwargs)
)
assert self.qscheme in (
torch.per_tensor_affine,
torch.per_tensor_symmetric,
torch.per_channel_affine,
Reported by Pylint.
Line: 181
Column: 13
"eps", torch.tensor([torch.finfo(torch.float32).eps], **factory_kwargs)
)
assert self.qscheme in (
torch.per_tensor_affine,
torch.per_tensor_symmetric,
torch.per_channel_affine,
torch.per_channel_symmetric,
torch.per_channel_affine_float_qparams,
), "Default Observer only works for per_tensor_affine, \
Reported by Pylint.
Line: 182
Column: 13
)
assert self.qscheme in (
torch.per_tensor_affine,
torch.per_tensor_symmetric,
torch.per_channel_affine,
torch.per_channel_symmetric,
torch.per_channel_affine_float_qparams,
), "Default Observer only works for per_tensor_affine, \
per_tensor_symmetric, per_channel_affine, \
Reported by Pylint.
Line: 183
Column: 13
assert self.qscheme in (
torch.per_tensor_affine,
torch.per_tensor_symmetric,
torch.per_channel_affine,
torch.per_channel_symmetric,
torch.per_channel_affine_float_qparams,
), "Default Observer only works for per_tensor_affine, \
per_tensor_symmetric, per_channel_affine, \
per_channel_symmetric and per_channel_float_qparams quantization scheme"
Reported by Pylint.
Line: 184
Column: 13
torch.per_tensor_affine,
torch.per_tensor_symmetric,
torch.per_channel_affine,
torch.per_channel_symmetric,
torch.per_channel_affine_float_qparams,
), "Default Observer only works for per_tensor_affine, \
per_tensor_symmetric, per_channel_affine, \
per_channel_symmetric and per_channel_float_qparams quantization scheme"
assert self.dtype in (
Reported by Pylint.
test/mobile/test_lite_script_module.py
245 issues
Line: 1
Column: 1
import torch
import torch.utils.bundled_inputs
import io
from typing import Dict, List, NamedTuple
from collections import namedtuple
import inspect
from torch.jit.mobile import _load_for_lite_interpreter, _export_operator_list
from torch.testing._internal.common_utils import TestCase, run_tests
Reported by Pylint.
Line: 2
Column: 1
import torch
import torch.utils.bundled_inputs
import io
from typing import Dict, List, NamedTuple
from collections import namedtuple
import inspect
from torch.jit.mobile import _load_for_lite_interpreter, _export_operator_list
from torch.testing._internal.common_utils import TestCase, run_tests
Reported by Pylint.
Line: 8
Column: 1
from collections import namedtuple
import inspect
from torch.jit.mobile import _load_for_lite_interpreter, _export_operator_list
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.common_quantization import (
AnnotatedSingleLayerLinearModel,
TwoLayerLinearModel,
AnnotatedNestedModel
Reported by Pylint.
Line: 9
Column: 1
import inspect
from torch.jit.mobile import _load_for_lite_interpreter, _export_operator_list
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.common_quantization import (
AnnotatedSingleLayerLinearModel,
TwoLayerLinearModel,
AnnotatedNestedModel
)
Reported by Pylint.
Line: 10
Column: 1
from torch.jit.mobile import _load_for_lite_interpreter, _export_operator_list
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.common_quantization import (
AnnotatedSingleLayerLinearModel,
TwoLayerLinearModel,
AnnotatedNestedModel
)
from torch.testing._internal.common_quantization import QuantizationLiteTestCase
Reported by Pylint.
Line: 15
Column: 1
TwoLayerLinearModel,
AnnotatedNestedModel
)
from torch.testing._internal.common_quantization import QuantizationLiteTestCase
class TestLiteScriptModule(TestCase):
def getScriptExportImportCopy(self, m, save_mobile_debug_info=True, also_test_file=False):
m_scripted = torch.jit.script(m)
Reported by Pylint.
Line: 28
Column: 14
mobile_module = _load_for_lite_interpreter(buffer)
return mobile_module
with TemporaryFileName() as fname:
m_scripted._save_for_lite_interpreter(fname, _save_mobile_debug_info=save_mobile_debug_info)
mobile_module = _load_for_lite_interpreter(fname)
return mobile_module
def test_load_mobile_module(self):
Reported by Pylint.
Line: 23
Column: 33
m_scripted = torch.jit.script(m)
if not also_test_file:
buffer = io.BytesIO(m_scripted._save_to_buffer_for_lite_interpreter(_save_mobile_debug_info=save_mobile_debug_info))
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
return mobile_module
with TemporaryFileName() as fname:
Reported by Pylint.
Line: 29
Column: 13
return mobile_module
with TemporaryFileName() as fname:
m_scripted._save_for_lite_interpreter(fname, _save_mobile_debug_info=save_mobile_debug_info)
mobile_module = _load_for_lite_interpreter(fname)
return mobile_module
def test_load_mobile_module(self):
class MyTestModule(torch.nn.Module):
Reported by Pylint.
Line: 35
Column: 13
def test_load_mobile_module(self):
class MyTestModule(torch.nn.Module):
def __init__(self):
super(MyTestModule, self).__init__()
def forward(self, x):
return x + 10
Reported by Pylint.