The following issues were found
test/jit/test_peephole.py
327 issues
Line: 1
Column: 1
import torch
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA, _inline_everything
from torch import nn
from torch.testing import FileCheck
from typing import List
import unittest
if __name__ == '__main__':
Reported by Pylint.
Line: 2
Column: 1
import torch
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA, _inline_everything
from torch import nn
from torch.testing import FileCheck
from typing import List
import unittest
if __name__ == '__main__':
Reported by Pylint.
Line: 3
Column: 1
import torch
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA, _inline_everything
from torch import nn
from torch.testing import FileCheck
from typing import List
import unittest
if __name__ == '__main__':
Reported by Pylint.
Line: 4
Column: 1
import torch
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA, _inline_everything
from torch import nn
from torch.testing import FileCheck
from typing import List
import unittest
if __name__ == '__main__':
Reported by Pylint.
Line: 88
Column: 9
FileCheck().check("value=3").check_next("return").run(foo.graph)
@torch.jit.script
def foo(x, y, z):
li = [x, y, z]
for i in range(len(x)):
li.append(x)
return len([x, y, z])
Reported by Pylint.
Line: 98
Column: 9
FileCheck().check_not("aten::len").run(foo.graph)
@torch.jit.script
def foo(x, y, z):
li = [x, y, z]
return li[1], li[-2]
FileCheck().check("aten::__getitem__").run(foo.graph)
self.run_pass('peephole', foo.graph)
Reported by Pylint.
Line: 107
Column: 9
FileCheck().check_not("aten::__getitem__").run(foo.graph)
@torch.jit.script
def foo(x, y, z):
li = [x, y, z]
return li[-7]
self.run_pass('peephole', foo.graph)
FileCheck().check("aten::__getitem__").run(foo.graph)
Reported by Pylint.
Line: 115
Column: 9
FileCheck().check("aten::__getitem__").run(foo.graph)
@torch.jit.script
def foo(x, y, z):
li = [x, y, z]
for i in range(len(x)):
li.append(x)
return li[-2]
Reported by Pylint.
Line: 264
Column: 9
foo(2, 4)
@torch.jit.script
def foo(x: List[int], y: List[int]):
if len(x) == 4 and len(y) == 5:
pass
else:
raise Exception("hi")
Reported by Pylint.
Line: 278
Column: 9
foo(2, 4)
@torch.jit.script
def foo(x: List[int], y: List[int], z: List[int]):
if len(x) != 4:
raise Exception("..")
else:
if len(y) != 8:
raise Exception("...")
Reported by Pylint.
test/test_mkldnn.py
324 issues
Line: 14
Column: 1
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
Reported by Pylint.
Line: 15
Column: 1
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS
Reported by Pylint.
Line: 16
Column: 1
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS
Reported by Pylint.
Line: 17
Column: 1
import torch
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS
# batched grad doesn't support mkldnn
Reported by Pylint.
Line: 18
Column: 1
import torch.nn.functional as F
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS
# batched grad doesn't support mkldnn
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
Reported by Pylint.
Line: 19
Column: 1
import torch.jit
import torch.backends.mkldnn
from torch.utils import mkldnn as mkldnn_utils
from torch.testing._internal.common_utils import TestCase, \
run_tests, TemporaryFileName, gradcheck, gradgradcheck, IS_WINDOWS
# batched grad doesn't support mkldnn
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
gradgradcheck = functools.partial(gradgradcheck, check_batched_grad=False)
Reported by Pylint.
Line: 42
Column: 22
types = [torch.float, torch.bfloat16]
# Comment the line below to find out the CI machines having MKL-DNN build disabled
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMkldnn(TestCase):
def test_conversion(self):
for cpu_tensor in [torch.randn((1, 2, 3, 4),
dtype=torch.float, device=torch.device('cpu')),
torch.randn((1, 2, 3, 4, 5),
Reported by Pylint.
Line: 73
Column: 48
self.assertEqual(mkldnn_tensor.element_size(), cpu_tensor.element_size() / 2)
self.assertRaisesRegex(RuntimeError,
"Cannot access data pointer of Tensor that doesn't have storage",
lambda: mkldnn_tensor.data_ptr() != 0)
# bfloat cpu tensor to mkldnn float tensor or bfloat tensor.
cpu_tensor_bf16 = cpu_tensor.bfloat16()
for dtype1 in types:
mkldnn_tensor = cpu_tensor_bf16.to_mkldnn(dtype1)
Reported by Pylint.
Line: 123
Column: 53
# unsupported types and unsupported types with gpu
for dtype in [torch.double, torch.half, torch.uint8, torch.int8,
torch.short, torch.int, torch.long]:
with self.assertRaises(RuntimeError) as context:
torch.randn(1, 2, 3, 4, dtype=dtype, device=torch.device('cpu')).to_mkldnn()
if torch.cuda.is_available():
with self.assertRaises(RuntimeError) as context:
torch.randn(1, 2, 3, 4, dtype=dtype, device=torch.device('cuda')).to_mkldnn()
# supported type with gpu
Reported by Pylint.
Line: 135
Column: 91
# some factory functions
for creator in [torch.ones, torch.randn, torch.rand]:
with self.assertRaises(RuntimeError) as context:
creator(1, 2, 3, 4, dtype=torch.float, device=torch.device('cpu'), layout=torch._mkldnn)
def test_autograd_to_mkldnn(self):
# MKLDNN only supports float32
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
Reported by Pylint.
test/quantization/core/test_workflow_ops.py
321 issues
Line: 1
Column: 1
import torch
import math
from typing import Tuple
from torch.quantization import (
FakeQuantize,
MovingAverageMinMaxObserver,
default_observer,
default_affine_fixed_qparams_fake_quant,
)
Reported by Pylint.
Line: 4
Column: 1
import torch
import math
from typing import Tuple
from torch.quantization import (
FakeQuantize,
MovingAverageMinMaxObserver,
default_observer,
default_affine_fixed_qparams_fake_quant,
)
Reported by Pylint.
Line: 11
Column: 1
default_affine_fixed_qparams_fake_quant,
)
from torch.quantization._learnable_fake_quantize import _LearnableFakeQuantize
from torch.testing._internal.common_quantized import (
_fake_quantize_per_channel_affine_reference,
_fake_quantize_per_channel_affine_grad_reference,
to_tensor,
)
Reported by Pylint.
Line: 12
Column: 1
)
from torch.quantization._learnable_fake_quantize import _LearnableFakeQuantize
from torch.testing._internal.common_quantized import (
_fake_quantize_per_channel_affine_reference,
_fake_quantize_per_channel_affine_grad_reference,
to_tensor,
)
import torch.nn as nn
Reported by Pylint.
Line: 17
Column: 1
_fake_quantize_per_channel_affine_grad_reference,
to_tensor,
)
import torch.nn as nn
# Standard library
import io
import itertools
import unittest
Reported by Pylint.
Line: 26
Column: 1
import numpy as np
# Testing utils
from hypothesis import given, settings
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import TestCase
Reported by Pylint.
Line: 27
Column: 1
# Testing utils
from hypothesis import given, settings
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import TestCase
Reported by Pylint.
Line: 28
Column: 1
# Testing utils
from hypothesis import given, settings
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import TestCase
# Reference method for fake quantize
Reported by Pylint.
Line: 30
Column: 1
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import TestCase
# Reference method for fake quantize
# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64
def _fake_quantize_per_tensor_affine_reference(X, scale, zero_point, quant_min, quant_max):
Reported by Pylint.
Line: 31
Column: 1
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import TestCase
# Reference method for fake quantize
# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64
def _fake_quantize_per_tensor_affine_reference(X, scale, zero_point, quant_min, quant_max):
dtype = X.dtype
Reported by Pylint.
test/jit/test_type_sharing.py
317 issues
Line: 5
Column: 1
import sys
import io
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 10
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import suppress_warnings
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
Reported by Pylint.
Line: 11
Column: 1
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import suppress_warnings
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 24
Column: 41
m1 = torch.jit.script(m1)
if not isinstance(m2, torch.jit.ScriptModule):
m2 = torch.jit.script(m2)
self.assertEqual(m1._c._type(), m2._c._type())
def assertDifferentType(self, m1, m2):
if not isinstance(m1, torch.jit.ScriptModule):
m1 = torch.jit.script(m1)
if not isinstance(m2, torch.jit.ScriptModule):
Reported by Pylint.
Line: 24
Column: 26
m1 = torch.jit.script(m1)
if not isinstance(m2, torch.jit.ScriptModule):
m2 = torch.jit.script(m2)
self.assertEqual(m1._c._type(), m2._c._type())
def assertDifferentType(self, m1, m2):
if not isinstance(m1, torch.jit.ScriptModule):
m1 = torch.jit.script(m1)
if not isinstance(m2, torch.jit.ScriptModule):
Reported by Pylint.
Line: 24
Column: 41
m1 = torch.jit.script(m1)
if not isinstance(m2, torch.jit.ScriptModule):
m2 = torch.jit.script(m2)
self.assertEqual(m1._c._type(), m2._c._type())
def assertDifferentType(self, m1, m2):
if not isinstance(m1, torch.jit.ScriptModule):
m1 = torch.jit.script(m1)
if not isinstance(m2, torch.jit.ScriptModule):
Reported by Pylint.
Line: 24
Column: 26
m1 = torch.jit.script(m1)
if not isinstance(m2, torch.jit.ScriptModule):
m2 = torch.jit.script(m2)
self.assertEqual(m1._c._type(), m2._c._type())
def assertDifferentType(self, m1, m2):
if not isinstance(m1, torch.jit.ScriptModule):
m1 = torch.jit.script(m1)
if not isinstance(m2, torch.jit.ScriptModule):
Reported by Pylint.
Line: 31
Column: 44
m1 = torch.jit.script(m1)
if not isinstance(m2, torch.jit.ScriptModule):
m2 = torch.jit.script(m2)
self.assertNotEqual(m1._c._type(), m2._c._type())
def test_basic(self):
class M(torch.nn.Module):
def __init__(self, a, b, c):
super(M, self).__init__()
Reported by Pylint.
Line: 31
Column: 29
m1 = torch.jit.script(m1)
if not isinstance(m2, torch.jit.ScriptModule):
m2 = torch.jit.script(m2)
self.assertNotEqual(m1._c._type(), m2._c._type())
def test_basic(self):
class M(torch.nn.Module):
def __init__(self, a, b, c):
super(M, self).__init__()
Reported by Pylint.
Line: 31
Column: 29
m1 = torch.jit.script(m1)
if not isinstance(m2, torch.jit.ScriptModule):
m2 = torch.jit.script(m2)
self.assertNotEqual(m1._c._type(), m2._c._type())
def test_basic(self):
class M(torch.nn.Module):
def __init__(self, a, b, c):
super(M, self).__init__()
Reported by Pylint.
test/jit/test_module_containers.py
304 issues
Line: 6
Column: 1
from typing import Any, List, Tuple
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.testing._internal.jit_utils import JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 7
Column: 1
from typing import Any, List, Tuple
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.testing._internal.jit_utils import JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 8
Column: 1
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.testing._internal.jit_utils import JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 22
Column: 13
class TestModuleContainers(JitTestCase):
def test_sequential_intermediary_types(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
def forward(self, x):
return x + 3
Reported by Pylint.
Line: 29
Column: 13
return x + 3
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
def forward(self, x):
return {"1": x}
Reported by Pylint.
Line: 91
Column: 13
return x, names
class M2(M):
def __init__(self):
super(M2, self).__init__()
def forward(self, x, skip_name):
# type: (Tensor, str)
names = torch.jit.annotate(List[str], [])
Reported by Pylint.
Line: 99
Column: 17
names = torch.jit.annotate(List[str], [])
values = []
x2 = x
iter = 0
for name in self.moduledict:
names.append(name)
for i, (name, mod) in enumerate(self.moduledict.items()):
iter += i
Reported by Pylint.
Line: 119
Column: 21
iter += i
names.append(key)
for mod, mod in zip(self.moduledict.values(), self.moduledict.values()):
iter += i
x2 = mod(mod(x2))
return x, x2, names, iter
Reported by Pylint.
Line: 234
Column: 13
self.checkModule(MForward(), (torch.tensor(1),))
class M2(M):
def __init__(self):
super(M2, self).__init__()
def forward(self, v):
return self.mods[-11].forward(v)
Reported by Pylint.
Line: 244
Column: 13
torch.jit.script(M2())
class M3(M):
def __init__(self):
super(M3, self).__init__()
def forward(self, v):
i = 3
return self.mods[i].forward(v)
Reported by Pylint.
test/test_datapipe.py
301 issues
Line: 36
Column: 1
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data.backward_compatibility
import torch.utils.data.datapipes as dp
import torch.utils.data.graph
import torch.utils.data.sharding
Reported by Pylint.
Line: 37
Column: 1
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data.backward_compatibility
import torch.utils.data.datapipes as dp
import torch.utils.data.graph
import torch.utils.data.sharding
from torch.testing._internal.common_utils import TestCase, run_tests
Reported by Pylint.
Line: 38
Column: 1
import torch
import torch.nn as nn
import torch.utils.data.backward_compatibility
import torch.utils.data.datapipes as dp
import torch.utils.data.graph
import torch.utils.data.sharding
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.utils.data import (
Reported by Pylint.
Line: 39
Column: 1
import torch
import torch.nn as nn
import torch.utils.data.backward_compatibility
import torch.utils.data.datapipes as dp
import torch.utils.data.graph
import torch.utils.data.sharding
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.utils.data import (
DataLoader,
Reported by Pylint.
Line: 40
Column: 1
import torch.nn as nn
import torch.utils.data.backward_compatibility
import torch.utils.data.datapipes as dp
import torch.utils.data.graph
import torch.utils.data.sharding
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.utils.data import (
DataLoader,
DataChunk,
Reported by Pylint.
Line: 41
Column: 1
import torch.utils.data.backward_compatibility
import torch.utils.data.datapipes as dp
import torch.utils.data.graph
import torch.utils.data.sharding
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.utils.data import (
DataLoader,
DataChunk,
IterDataPipe,
Reported by Pylint.
Line: 42
Column: 1
import torch.utils.data.datapipes as dp
import torch.utils.data.graph
import torch.utils.data.sharding
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.utils.data import (
DataLoader,
DataChunk,
IterDataPipe,
MapDataPipe,
Reported by Pylint.
Line: 43
Column: 1
import torch.utils.data.graph
import torch.utils.data.sharding
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.utils.data import (
DataLoader,
DataChunk,
IterDataPipe,
MapDataPipe,
RandomSampler,
Reported by Pylint.
Line: 53
Column: 1
runtime_validation,
runtime_validation_disabled,
)
from torch.utils.data.datapipes.utils.decoder import (
basichandlers as decoder_basichandlers,
)
try:
import torchvision.transforms
Reported by Pylint.
Line: 197
Column: 9
def test_loadfilesfromdisk_iterable_datapipe(self):
# test import datapipe class directly
from torch.utils.data.datapipes.iter import (
ListDirFiles,
LoadFilesFromDisk,
)
temp_dir = self.temp_dir.name
Reported by Pylint.
caffe2/python/layers_test.py
299 issues
Line: 6
Column: 1
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 9
Column: 1
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import (
layer_model_instantiator,
Reported by Pylint.
Line: 273
Column: 9
core.BlobReference("fc_with_bootstrap/bootstrap_iteration_1/preds") == fc_with_bootstrap[3].field_blobs()[0]
)
train_init_net, train_net = self.get_training_nets()
predict_net = layer_model_instantiator.generate_predict_net(self.model)
train_proto = train_net.Proto()
eval_proto = predict_net.Proto()
Reported by Pylint.
Line: 345
Column: 9
fc_out
)
train_init_net, train_net = self.get_training_nets()
def testFCTransposed(self):
input_dim = 10
output_dim = 30
max_length = 20
Reported by Pylint.
Line: 345
Column: 25
fc_out
)
train_init_net, train_net = self.get_training_nets()
def testFCTransposed(self):
input_dim = 10
output_dim = 30
max_length = 20
Reported by Pylint.
Line: 366
Column: 25
fc_transposed_out
)
train_init_net, train_net = self.get_training_nets()
def testFCTransposedWithMaxFCSize(self):
input_dim = 10
output_dim = 30
max_length = 20
Reported by Pylint.
Line: 366
Column: 9
fc_transposed_out
)
train_init_net, train_net = self.get_training_nets()
def testFCTransposedWithMaxFCSize(self):
input_dim = 10
output_dim = 30
max_length = 20
Reported by Pylint.
Line: 388
Column: 9
fc_transposed_out
)
train_init_net, train_net = self.get_training_nets()
def testSparseLookupSumPoolingWithEviction(self):
# Create test embedding table of 1 row
record = schema.NewRecord(self.model.net, schema.Struct(
('sparse', schema.Struct(
Reported by Pylint.
Line: 388
Column: 25
fc_transposed_out
)
train_init_net, train_net = self.get_training_nets()
def testSparseLookupSumPoolingWithEviction(self):
# Create test embedding table of 1 row
record = schema.NewRecord(self.model.net, schema.Struct(
('sparse', schema.Struct(
Reported by Pylint.
Line: 401
Column: 31
embedding_dim = 8
lengths_blob = record.sparse.sparse_feature_0.lengths.get()
values_blob = record.sparse.sparse_feature_0.items.get()
evicted_values_blob = record.sparse.sparse_feature_0._evicted_values.get()
lengths = np.array([1]).astype(np.int32)
values = np.array([0]).astype(np.int64)
# Need to reset row 0
evicted_values = np.array([0]).astype(np.int64)
workspace.FeedBlob(lengths_blob, lengths)
Reported by Pylint.
tools/codegen/model.py
298 issues
Line: 117
Column: 5
PrivateUse2_PreAutograd = AutogradPrivateUse2
PrivateUse3_PreAutograd = AutogradPrivateUse3
def __str__(self) -> str:
return self.name
def lower(self) -> str:
return str(self).lower()
Reported by Pylint.
Line: 309
Column: 103
assert isinstance(structured, bool), f'not a bool: {structured}'
structured_delegate_s = e.pop('structured_delegate', None)
assert structured_delegate_s is None or isinstance(structured_delegate_s, str), f'not a str: {structured_delegate}'
structured_delegate: Optional[OperatorName] = None
if structured_delegate_s is not None:
structured_delegate = OperatorName.parse(structured_delegate_s)
structured_inherits = e.pop('structured_inherits', None)
Reported by Pylint.
Line: 832
Column: 37
some variants have return names but some not
"""
def strip_ret_annotation(r: Return) -> Return:
return Return(
name=None,
type=r.type,
annotation=None,
)
Reported by Pylint.
Line: 10
Column: 3
# A little trick from https://github.com/python/mypy/issues/6366
# for getting mypy to do exhaustiveness checking
# TODO: put this somewhere else, maybe
def assert_never(x: NoReturn) -> NoReturn:
raise AssertionError("Unhandled type: {}".format(type(x).__name__))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
Reported by Pylint.
Line: 191
Column: 3
# What python module to put the function in
python_module: Optional[str]
# TODO: figure out what this does
category_override: Optional[str]
# If no variants are specified in native_functions.yaml, this is
# assumed to be {'function'}.
variants: Set[Variant]
Reported by Pylint.
Line: 407
Column: 3
def validate_unstructured(self) -> None:
# TODO: probably better to accumulate these errors and report them all
# at once
assert not self.structured, "This function is structured, but there was " \
"no valid functional variant of it."
assert self.structured_delegate, "This function delegates to another structured out function, " \
"but no valid function was found (the delegate may not exist, or it has the wrong type)"
Reported by Pylint.
Line: 649
Column: 3
if self.external:
return f'{str(self.dispatch_key)}NativeFunctions'
else:
# TODO: This discrepancy isn't required; we could also generated
# a class for in-tree kernels. It'll just require carefully
# updating every kernel definition + callsite of every in-tree aten kernel.
return None
Reported by Pylint.
Line: 715
Column: 3
arguments: 'Arguments'
# TODO: Need to handle collisions with argument names at some point
returns: Tuple['Return', ...]
def schema_order_arguments(self) -> Iterator['Argument']:
return itertools.chain(
self.arguments.flat_positional,
Reported by Pylint.
Line: 763
Column: 3
assert len(self.arguments.out) == len(self.returns), \
"Must return as many arguments as there are out arguments"
if self.name.name.inplace:
# TODO: fixme
if not is_foreach_op(str(self.name)):
assert len(self.returns) == 1
def is_out_fn(self) -> bool:
# Note [is_out_fn]
Reported by Pylint.
Line: 915
Column: 13
try:
return BaseType(BaseTy[t])
except KeyError:
raise RuntimeError(f"unrecognized type {t}")
def __str__(self) -> str:
raise NotImplementedError
# WARNING: These concepts are not very well-defined. For example,
Reported by Pylint.
torch/testing/_internal/autocast_test_lists.py
296 issues
Line: 7
Column: 18
class AutocastTestLists(object):
def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype):
input = (torch.randn((n, n), device=dev, dtype=torch.float32),)
hx = ((torch.randn((n, n), device=dev, dtype=torch.float32),
torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
torch.randn((n, n), device=dev, dtype=torch.float32),)
Reported by Pylint.
Line: 7
Column: 56
class AutocastTestLists(object):
def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype):
input = (torch.randn((n, n), device=dev, dtype=torch.float32),)
hx = ((torch.randn((n, n), device=dev, dtype=torch.float32),
torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
torch.randn((n, n), device=dev, dtype=torch.float32),)
Reported by Pylint.
Line: 9
Column: 16
def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype):
input = (torch.randn((n, n), device=dev, dtype=torch.float32),)
hx = ((torch.randn((n, n), device=dev, dtype=torch.float32),
torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
torch.randn((n, n), device=dev, dtype=torch.float32),)
weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih
torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh
Reported by Pylint.
Line: 9
Column: 54
def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype):
input = (torch.randn((n, n), device=dev, dtype=torch.float32),)
hx = ((torch.randn((n, n), device=dev, dtype=torch.float32),
torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
torch.randn((n, n), device=dev, dtype=torch.float32),)
weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih
torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh
Reported by Pylint.
Line: 10
Column: 54
input = (torch.randn((n, n), device=dev, dtype=torch.float32),)
hx = ((torch.randn((n, n), device=dev, dtype=torch.float32),
torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
torch.randn((n, n), device=dev, dtype=torch.float32),)
weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih
torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh
torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih
Reported by Pylint.
Line: 10
Column: 16
input = (torch.randn((n, n), device=dev, dtype=torch.float32),)
hx = ((torch.randn((n, n), device=dev, dtype=torch.float32),
torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
torch.randn((n, n), device=dev, dtype=torch.float32),)
weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih
torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh
torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih
Reported by Pylint.
Line: 11
Column: 15
hx = ((torch.randn((n, n), device=dev, dtype=torch.float32),
torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
torch.randn((n, n), device=dev, dtype=torch.float32),)
weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih
torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh
torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih
torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh
Reported by Pylint.
Line: 11
Column: 53
hx = ((torch.randn((n, n), device=dev, dtype=torch.float32),
torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
torch.randn((n, n), device=dev, dtype=torch.float32),)
weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih
torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh
torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih
torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh
Reported by Pylint.
Line: 13
Column: 20
torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
torch.randn((n, n), device=dev, dtype=torch.float32),)
weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih
torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh
torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih
torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh
# returns args as a tuple
Reported by Pylint.
Line: 13
Column: 71
torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
torch.randn((n, n), device=dev, dtype=torch.float32),)
weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih
torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh
torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih
torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh
# returns args as a tuple
Reported by Pylint.
test/test_xnnpack_integration.py
291 issues
Line: 3
Column: 1
import unittest
import torch
import torch.backends.xnnpack
from torch.nn import functional as F
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
Reported by Pylint.
Line: 4
Column: 1
import unittest
import torch
import torch.backends.xnnpack
from torch.nn import functional as F
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
Reported by Pylint.
Line: 5
Column: 1
import torch
import torch.backends.xnnpack
from torch.nn import functional as F
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
Reported by Pylint.
Line: 6
Column: 1
import torch
import torch.backends.xnnpack
from torch.nn import functional as F
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
from hypothesis import strategies as st
Reported by Pylint.
Line: 7
Column: 1
import torch.backends.xnnpack
from torch.nn import functional as F
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
from hypothesis import strategies as st
import io
Reported by Pylint.
Line: 8
Column: 1
from torch.nn import functional as F
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
from hypothesis import strategies as st
import io
import itertools
Reported by Pylint.
Line: 9
Column: 1
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
from hypothesis import strategies as st
import io
import itertools
Reported by Pylint.
Line: 10
Column: 1
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
from hypothesis import strategies as st
import io
import itertools
from torch.testing._internal.common_utils import TEST_WITH_TSAN
Reported by Pylint.
Line: 11
Column: 1
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
from hypothesis import strategies as st
import io
import itertools
from torch.testing._internal.common_utils import TEST_WITH_TSAN
Reported by Pylint.
Line: 15
Column: 1
import io
import itertools
from torch.testing._internal.common_utils import TEST_WITH_TSAN
@unittest.skipUnless(torch.backends.xnnpack.enabled,
" XNNPACK must be enabled for these tests."
" Please build with USE_XNNPACK=1.")
@unittest.skipIf(TEST_WITH_TSAN, "TSAN fails with XNNPACK. Does not seem to have a good reason for failures.")
Reported by Pylint.