The following issues were found
test/jit/test_class_type.py
636 issues
Line: 6
Column: 1
import sys
import unittest
import torch
import torch.nn as nn
from torch.testing import FileCheck
from typing import Any
# Make the helper files in test/ importable
Reported by Pylint.
Line: 7
Column: 1
import unittest
import torch
import torch.nn as nn
from torch.testing import FileCheck
from typing import Any
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 8
Column: 1
import torch
import torch.nn as nn
from torch.testing import FileCheck
from typing import Any
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 14
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, make_global
import torch.testing._internal.jit_utils
from torch.testing._internal.common_utils import IS_SANDCASTLE
from typing import List, Tuple, Iterable, Optional, Dict
if __name__ == '__main__':
Reported by Pylint.
Line: 15
Column: 1
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, make_global
import torch.testing._internal.jit_utils
from torch.testing._internal.common_utils import IS_SANDCASTLE
from typing import List, Tuple, Iterable, Optional, Dict
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
Reported by Pylint.
Line: 16
Column: 1
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, make_global
import torch.testing._internal.jit_utils
from torch.testing._internal.common_utils import IS_SANDCASTLE
from typing import List, Tuple, Iterable, Optional, Dict
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
Reported by Pylint.
Line: 134
Column: 28
self.foo = x
def get_non_initialized(self):
return self.asdf # asdf isn't an attr
def test_set_attr_non_initialized(self):
with self.assertRaisesRegexWithHighlight(RuntimeError, "Tried to set nonexistent attribute", "self.bar = y"):
@torch.jit.script
class FooTest(object):
Reported by Pylint.
Line: 422
Column: 13
with self.assertRaisesRegexWithHighlight(RuntimeError, "bool\' for argument \'reverse", ""):
@torch.jit.script
def test():
li = [Foo(1)]
li.sort(li)
return li
test()
Reported by Pylint.
Line: 426
Column: 13
li = [Foo(1)]
li.sort(li)
return li
test()
with self.assertRaisesRegexWithHighlight(RuntimeError, "must define a __lt__", ""):
@torch.jit.script
class NoMethod(object):
def __init__(self):
Reported by Pylint.
Line: 435
Column: 13
pass
@torch.jit.script
def test():
li = [NoMethod(), NoMethod()]
li.sort()
return li
test()
Reported by Pylint.
torch/_torch_docs.py
545 issues
Line: 127
Column: 12
See :doc:`/notes/randomness` for more information."""
}
add_docstr(torch.abs, r"""
abs(input, *, out=None) -> Tensor
Computes the absolute value of each element in :attr:`input`.
.. math::
Reported by Pylint.
Line: 147
Column: 12
tensor([ 1, 2, 3])
""".format(**common_args))
add_docstr(torch.absolute,
r"""
absolute(input, *, out=None) -> Tensor
Alias for :func:`torch.abs`
""".format(**common_args))
Reported by Pylint.
Line: 154
Column: 12
Alias for :func:`torch.abs`
""".format(**common_args))
add_docstr(torch.acos, r"""
acos(input, *, out=None) -> Tensor
Computes the inverse cosine of each element in :attr:`input`.
.. math::
Reported by Pylint.
Line: 177
Column: 12
tensor([ 1.2294, 2.2004, 1.3690, 1.7298])
""".format(**common_args))
add_docstr(torch.arccos, r"""
arccos(input, *, out=None) -> Tensor
Alias for :func:`torch.acos`.
""")
Reported by Pylint.
Line: 183
Column: 12
Alias for :func:`torch.acos`.
""")
add_docstr(torch.acosh, r"""
acosh(input, *, out=None) -> Tensor
Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
Note:
Reported by Pylint.
Line: 210
Column: 12
tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ])
""".format(**common_args))
add_docstr(torch.arccosh, r"""
arccosh(input, *, out=None) -> Tensor
Alias for :func:`torch.acosh`.
""".format(**common_args))
Reported by Pylint.
Line: 216
Column: 12
Alias for :func:`torch.acosh`.
""".format(**common_args))
add_docstr(torch.add, r"""
add(input, other, *, out=None) -> Tensor
Adds the scalar :attr:`other` to each element of the input :attr:`input`
and returns a new resulting tensor.
Reported by Pylint.
Line: 285
Column: 12
[ -8.9902, -8.3667, -7.3925, -7.6147]])
""".format(**common_args))
add_docstr(torch.addbmm,
r"""
addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored
in :attr:`batch1` and :attr:`batch2`,
Reported by Pylint.
Line: 335
Column: 12
[ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
""".format(**common_args, **tf32_notes))
add_docstr(torch.addcdiv, r"""
addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
multiply the result by the scalar :attr:`value` and add it to :attr:`input`.
Reported by Pylint.
Line: 380
Column: 12
[-0.5369, -0.9829, 0.0430]])
""".format(**common_args))
add_docstr(torch.addcmul,
r"""
addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
Performs the element-wise multiplication of :attr:`tensor1`
by :attr:`tensor2`, multiply the result by the scalar :attr:`value`
Reported by Pylint.
torch/testing/_internal/common_quantization.py
542 issues
Line: 115
Column: 28
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(output, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
return train_loss, correct, total
class AverageMeter(object):
Reported by Pylint.
Line: 195
Column: 37
prepared.to(rank)
model_with_ddp = prepared
optimizer = torch.optim.SGD(model_with_ddp.parameters(), lr=0.0001)
train_one_epoch(model_with_ddp, criterion, optimizer, dataset, rank, 1)
ddp_cleanup()
def convert_dynamic(module):
convert(module, get_default_dynamic_quant_module_mappings(), inplace=True)
Reported by Pylint.
Line: 195
Column: 59
prepared.to(rank)
model_with_ddp = prepared
optimizer = torch.optim.SGD(model_with_ddp.parameters(), lr=0.0001)
train_one_epoch(model_with_ddp, criterion, optimizer, dataset, rank, 1)
ddp_cleanup()
def convert_dynamic(module):
convert(module, get_default_dynamic_quant_module_mappings(), inplace=True)
Reported by Pylint.
Line: 214
Column: 14
out_channels = out_channels_per_group * groups
(X_value_min, X_value_max) = (0, 4)
X_init = torch.randint(
X_value_min, X_value_max,
(batch_size, in_channels,) + input_feature_map_size)
X = X_scale * (X_init - X_zero_point).float()
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zero_point, dtype=torch.quint8)
Reported by Pylint.
Line: 218
Column: 11
X_value_min, X_value_max,
(batch_size, in_channels,) + input_feature_map_size)
X = X_scale * (X_init - X_zero_point).float()
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zero_point, dtype=torch.quint8)
W_scale = W_scale * out_channels
W_zero_point = W_zero_point * out_channels
# Resize W_scale and W_zero_points arrays equal to out_channels
Reported by Pylint.
Line: 219
Column: 58
(batch_size, in_channels,) + input_feature_map_size)
X = X_scale * (X_init - X_zero_point).float()
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zero_point, dtype=torch.quint8)
W_scale = W_scale * out_channels
W_zero_point = W_zero_point * out_channels
# Resize W_scale and W_zero_points arrays equal to out_channels
W_scale = W_scale[:out_channels]
Reported by Pylint.
Line: 235
Column: 14
(W_value_min, W_value_max) = (-5, 5)
# The operator expects them in the format
# (out_channels, in_channels/groups,) + kernel_size
W_init = torch.randint(
W_value_min, W_value_max,
(out_channels, in_channels_per_group,) + kernel_size)
b_init = torch.randint(0, 10, (out_channels,))
if use_channelwise:
Reported by Pylint.
Line: 238
Column: 14
W_init = torch.randint(
W_value_min, W_value_max,
(out_channels, in_channels_per_group,) + kernel_size)
b_init = torch.randint(0, 10, (out_channels,))
if use_channelwise:
W_shape = (-1, 1) + (1,) * len(kernel_size)
W_scales_tensor = torch.tensor(W_scale, dtype=torch.float)
W_zero_points_tensor = torch.tensor(W_zero_point, dtype=torch.float)
Reported by Pylint.
Line: 242
Column: 27
if use_channelwise:
W_shape = (-1, 1) + (1,) * len(kernel_size)
W_scales_tensor = torch.tensor(W_scale, dtype=torch.float)
W_zero_points_tensor = torch.tensor(W_zero_point, dtype=torch.float)
W = W_scales_tensor.reshape(*W_shape) * (
W_init.float() - W_zero_points_tensor.reshape(*W_shape)).float()
b = X_scale * W_scales_tensor * b_init.float()
W_q = torch.quantize_per_channel(
Reported by Pylint.
Line: 242
Column: 55
if use_channelwise:
W_shape = (-1, 1) + (1,) * len(kernel_size)
W_scales_tensor = torch.tensor(W_scale, dtype=torch.float)
W_zero_points_tensor = torch.tensor(W_zero_point, dtype=torch.float)
W = W_scales_tensor.reshape(*W_shape) * (
W_init.float() - W_zero_points_tensor.reshape(*W_shape)).float()
b = X_scale * W_scales_tensor * b_init.float()
W_q = torch.quantize_per_channel(
Reported by Pylint.
test/test_reductions.py
533 issues
Line: 1
Column: 1
import torch
import numpy as np
import math
from typing import Dict, List
import random
from functools import partial
from itertools import product, combinations, permutations
import warnings
Reported by Pylint.
Line: 11
Column: 1
from itertools import product, combinations, permutations
import warnings
from torch._six import inf, nan
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
IS_WINDOWS, make_tensor)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
Reported by Pylint.
Line: 12
Column: 1
import warnings
from torch._six import inf, nan
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
IS_WINDOWS, make_tensor)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyOnCPUAndCUDA, onlyCUDA, largeTensorTest, precisionOverride)
Reported by Pylint.
Line: 15
Column: 1
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,
IS_WINDOWS, make_tensor)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyOnCPUAndCUDA, onlyCUDA, largeTensorTest, precisionOverride)
# TODO: replace with make_tensor
def _generate_input(shape, dtype, device, with_extremal):
Reported by Pylint.
Line: 105
Column: 9
@skipIfNoSciPy
def test_logsumexp(self, device):
from scipy.special import logsumexp
a = torch.randn(5, 4, device=device)
a[0, 0] = inf
a[1, :] = -inf
actual = a.logsumexp(1)
expected = logsumexp(a.cpu().numpy(), 1)
Reported by Pylint.
Line: 361
Column: 9
@onlyCPU
@skipIfNoSciPy
def test_logsumexp_dim(self, device):
from scipy.special import logsumexp
self._test_dim_ops(
lambda t, d: t.logsumexp(d),
lambda n, d: logsumexp(n, d),
use_integral=False)
Reported by Pylint.
Line: 2642
Column: 9
# returned data using allclose() or isinf() which does not exists in the former tests.
@skipIfNoSciPy
def test_tensor_reduce_ops_empty(self, device):
from scipy.special import logsumexp
shape = (2, 0, 4)
master_input = torch.randn(shape, device=device)
np_input = np.empty(shape)
test_functions = [
('prod', torch.prod, 1., np.prod),
Reported by Pylint.
Line: 19
Column: 3
instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,
onlyOnCPUAndCUDA, onlyCUDA, largeTensorTest, precisionOverride)
# TODO: replace with make_tensor
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
Reported by Pylint.
Line: 49
Column: 3
return x
# TODO: replace with make_tensor
def _rand_shape(dim, min_size, max_size):
shape = []
for i in range(dim):
shape.append(random.randint(min_size, max_size))
return tuple(shape)
Reported by Pylint.
Line: 52
Column: 9
# TODO: replace with make_tensor
def _rand_shape(dim, min_size, max_size):
shape = []
for i in range(dim):
shape.append(random.randint(min_size, max_size))
return tuple(shape)
class TestReductions(TestCase):
Reported by Pylint.
caffe2/python/core.py
524 issues
Line: 30
Column: 47
import os
# Mac os specific message
if (sys.platform == 'darwin' and 'leveldb' in C.registered_dbs()):
print('If you are using homebrew leveldb on a Mac OS, you might see an '
'error warning you that malloc_zone_unregister() failed. This is '
'not a caffe2 issue but is due to the homebrew leveldb having an '
'incompatible memory allocator. It does not affect usage.')
Reported by Pylint.
Line: 95
Column: 5
def GlobalInit(args):
TriggerLazyImport()
_GLOBAL_INIT_ARGS.extend(args[1:])
C.global_init(args)
def GetGlobalInitArgs():
return _GLOBAL_INIT_ARGS[:]
Reported by Pylint.
Line: 108
Column: 12
def IsOperatorWithEngine(op_type, engine):
TriggerLazyImport()
return C.op_registry_key(op_type, engine) in _REGISTERED_OPERATORS
def IsGPUDeviceType(device_type):
return device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP}
Reported by Pylint.
Line: 181
Column: 19
def InferOpBlobDevices(op):
device_info = C.infer_op_input_output_device(op.SerializeToString())
input_info = []
output_info = []
for dev_str in device_info[0]:
device_option = caffe2_pb2.DeviceOption()
device_option.ParseFromString(dev_str)
Reported by Pylint.
Line: 301
Column: 26
raise AttributeError(
'Method ' + op_type + ' is not a registered operator.' +
' Did you mean: [' +
",".join(workspace.C.nearby_opnames(op_type)) + ']'
)
return lambda *args, **kwargs: self._CreateAndAddToNet(
op_type, *args, **kwargs)
def __dir__(self):
Reported by Pylint.
Line: 439
Column: 13
if isinstance(grad_f, tuple):
grad_f = grad_f[0](*grad_f[1], **grad_f[2])
token = C.register_python_op(f, pass_workspace, '')
if grad_f:
C.register_python_gradient_op(token, grad_f)
return token
Reported by Pylint.
Line: 441
Column: 9
token = C.register_python_op(f, pass_workspace, '')
if grad_f:
C.register_python_gradient_op(token, grad_f)
return token
def CreatePythonOperator(
f, inputs,
Reported by Pylint.
Line: 643
Column: 44
assert(g1 == g2)
assert dev_1 == dev_2, (
"Unequal devices for sparse generators: "
"{} and {}".format(dev1, dev2)
)
assert(op1_i is None or op2_i is None)
assert(op1_v is None or op2_v is None)
assert(idx1_i == 0 or idx2_i == 0)
assert(idx1_v == 0 or idx2_v == 0)
Reported by Pylint.
Line: 643
Column: 50
assert(g1 == g2)
assert dev_1 == dev_2, (
"Unequal devices for sparse generators: "
"{} and {}".format(dev1, dev2)
)
assert(op1_i is None or op2_i is None)
assert(op1_v is None or op2_v is None)
assert(idx1_i == 0 or idx2_i == 0)
assert(idx1_v == 0 or idx2_v == 0)
Reported by Pylint.
Line: 1128
Column: 21
# TODO(tulloch) - Propagate GradientWrapper up through the stack.
def from_untyped(grad):
if grad is None:
w = C.GradientWrapper()
assert w.is_empty()
return w
try:
(indices, values) = grad
w = C.GradientWrapper()
Reported by Pylint.
test/onnx/test_operators.py
516 issues
Line: 2
Column: 1
from test_pytorch_common import TestCase, run_tests, flatten, skipIfNoLapack
import torch
import torch.onnx
from torch.autograd import Variable, Function
from torch.nn import Module, functional
import torch.nn as nn
Reported by Pylint.
Line: 2
Column: 1
from test_pytorch_common import TestCase, run_tests, flatten, skipIfNoLapack
import torch
import torch.onnx
from torch.autograd import Variable, Function
from torch.nn import Module, functional
import torch.nn as nn
Reported by Pylint.
Line: 2
Column: 1
from test_pytorch_common import TestCase, run_tests, flatten, skipIfNoLapack
import torch
import torch.onnx
from torch.autograd import Variable, Function
from torch.nn import Module, functional
import torch.nn as nn
Reported by Pylint.
Line: 4
Column: 1
from test_pytorch_common import TestCase, run_tests, flatten, skipIfNoLapack
import torch
import torch.onnx
from torch.autograd import Variable, Function
from torch.nn import Module, functional
import torch.nn as nn
Reported by Pylint.
Line: 5
Column: 1
from test_pytorch_common import TestCase, run_tests, flatten, skipIfNoLapack
import torch
import torch.onnx
from torch.autograd import Variable, Function
from torch.nn import Module, functional
import torch.nn as nn
import itertools
Reported by Pylint.
Line: 6
Column: 1
import torch
import torch.onnx
from torch.autograd import Variable, Function
from torch.nn import Module, functional
import torch.nn as nn
import itertools
import io
Reported by Pylint.
Line: 7
Column: 1
import torch
import torch.onnx
from torch.autograd import Variable, Function
from torch.nn import Module, functional
import torch.nn as nn
import itertools
import io
import inspect
Reported by Pylint.
Line: 8
Column: 1
import torch.onnx
from torch.autograd import Variable, Function
from torch.nn import Module, functional
import torch.nn as nn
import itertools
import io
import inspect
import glob
Reported by Pylint.
Line: 16
Column: 1
import glob
import os
import shutil
import torch.testing._internal.common_utils as common
'''Usage: python test/onnx/test_operators.py [--no-onnx] [--produce-onnx-test-data]
--no-onnx: no onnx python dependence
--produce-onnx-test-data: generate onnx test data
--accept: accept onnx updates and overwrite models
Reported by Pylint.
Line: 68
Column: 13
self.assertExpected(onnx_model_pbtxt, subname)
if _onnx_dep:
onnx_model_pb = export_to_pb(m, args, **kwargs)
import onnx
import onnx.checker
import onnx.numpy_helper
import test_onnx_common
model_def = onnx.ModelProto.FromString(onnx_model_pb)
onnx.checker.check_model(model_def)
Reported by Pylint.
torch/testing/_internal/distributed/rpc/jit/rpc_test.py
503 issues
Line: 34
Column: 28
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
@torch.jit.script
def rref_local_value(rref: RRef[Tensor]) -> Tensor:
return rref.local_value()
Reported by Pylint.
Line: 34
Column: 45
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
@torch.jit.script
def rref_local_value(rref: RRef[Tensor]) -> Tensor:
return rref.local_value()
Reported by Pylint.
Line: 62
Column: 40
class RRefAPITest:
@dist_init
def test_rref_is_owner(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
rref_var = rpc_return_rref(dst_worker_name)
@torch.jit.script
def rref_tensor_is_owner(rref_var: RRef[Tensor]) -> bool:
return rref_var.is_owner()
Reported by Pylint.
Line: 62
Column: 57
class RRefAPITest:
@dist_init
def test_rref_is_owner(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
rref_var = rpc_return_rref(dst_worker_name)
@torch.jit.script
def rref_tensor_is_owner(rref_var: RRef[Tensor]) -> bool:
return rref_var.is_owner()
Reported by Pylint.
Line: 70
Column: 9
return rref_var.is_owner()
res = rref_tensor_is_owner(rref_var)
self.assertEqual(res, False)
@dist_init
def test_rref_local_value(self):
if self.rank != 0:
return
Reported by Pylint.
Line: 74
Column: 12
@dist_init
def test_rref_local_value(self):
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
rref = rpc_return_rref(dst_worker_name)
Reported by Pylint.
Line: 77
Column: 40
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
rref = rpc_return_rref(dst_worker_name)
with self.assertRaisesRegex(
RuntimeError, r"Can't call RRef.local_value\(\) on a non-owner RRef"
):
Reported by Pylint.
Line: 77
Column: 57
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
rref = rpc_return_rref(dst_worker_name)
with self.assertRaisesRegex(
RuntimeError, r"Can't call RRef.local_value\(\) on a non-owner RRef"
):
Reported by Pylint.
Line: 80
Column: 14
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
rref = rpc_return_rref(dst_worker_name)
with self.assertRaisesRegex(
RuntimeError, r"Can't call RRef.local_value\(\) on a non-owner RRef"
):
rref_local_value(rref)
ret = ret = rpc.rpc_sync(dst_worker_name, rref_local_value, (rref,))
Reported by Pylint.
Line: 86
Column: 9
rref_local_value(rref)
ret = ret = rpc.rpc_sync(dst_worker_name, rref_local_value, (rref,))
self.assertEqual(ret, torch.add(torch.ones(2, 2), 1))
@dist_init
def test_local_rref_local_value(self):
if self.rank != 0:
return
Reported by Pylint.
test/quantization/fx/test_numeric_suite_fx.py
495 issues
Line: 6
Column: 1
import operator
import unittest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.quantization import default_dynamic_qconfig
import torch.nn.quantized as nnq
toq = torch.ops.quantized
Reported by Pylint.
Line: 7
Column: 1
import unittest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.quantization import default_dynamic_qconfig
import torch.nn.quantized as nnq
toq = torch.ops.quantized
from torch.quantization.quantize_fx import (
Reported by Pylint.
Line: 8
Column: 1
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.quantization import default_dynamic_qconfig
import torch.nn.quantized as nnq
toq = torch.ops.quantized
from torch.quantization.quantize_fx import (
convert_fx,
Reported by Pylint.
Line: 9
Column: 1
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.quantization import default_dynamic_qconfig
import torch.nn.quantized as nnq
toq = torch.ops.quantized
from torch.quantization.quantize_fx import (
convert_fx,
prepare_fx,
Reported by Pylint.
Line: 10
Column: 1
import torch.nn as nn
import torch.nn.functional as F
from torch.quantization import default_dynamic_qconfig
import torch.nn.quantized as nnq
toq = torch.ops.quantized
from torch.quantization.quantize_fx import (
convert_fx,
prepare_fx,
prepare_qat_fx,
Reported by Pylint.
Line: 12
Column: 1
from torch.quantization import default_dynamic_qconfig
import torch.nn.quantized as nnq
toq = torch.ops.quantized
from torch.quantization.quantize_fx import (
convert_fx,
prepare_fx,
prepare_qat_fx,
)
from torch.testing._internal.common_quantization import (
Reported by Pylint.
Line: 17
Column: 1
prepare_fx,
prepare_qat_fx,
)
from torch.testing._internal.common_quantization import (
ConvBnModel,
ConvBnReLUModel,
ConvModel,
QuantizationTestCase,
skipIfNoFBGEMM,
Reported by Pylint.
Line: 29
Column: 1
SparseNNModel,
skip_if_no_torchvision,
)
from torch.quantization.quantization_mappings import (
get_default_static_quant_module_mappings,
get_default_dynamic_quant_module_mappings,
get_default_float_to_quantized_operator_mappings,
)
from torch.testing._internal.common_quantization import NodeSpec as ns
Reported by Pylint.
Line: 34
Column: 1
get_default_dynamic_quant_module_mappings,
get_default_float_to_quantized_operator_mappings,
)
from torch.testing._internal.common_quantization import NodeSpec as ns
from torch.quantization.fx.pattern_utils import get_default_quant_patterns
import torch.quantization.fx.quantization_patterns as qp
from torch.quantization.ns.pattern_utils import (
get_type_a_related_to_b,
)
Reported by Pylint.
Line: 35
Column: 1
get_default_float_to_quantized_operator_mappings,
)
from torch.testing._internal.common_quantization import NodeSpec as ns
from torch.quantization.fx.pattern_utils import get_default_quant_patterns
import torch.quantization.fx.quantization_patterns as qp
from torch.quantization.ns.pattern_utils import (
get_type_a_related_to_b,
)
from torch.quantization.ns.graph_matcher import (
Reported by Pylint.
test/onnx/test_utility_funs.py
493 issues
Line: 1
Column: 1
from test_pytorch_common import TestCase, run_tests
import torch
import torch.onnx
from torch.onnx import utils, OperatorExportTypes, TrainingMode
from torch.onnx.symbolic_helper import _set_opset_version, _set_operator_export_type, _set_onnx_shape_inference
import torch.utils.cpp_extension
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion, skipIfUnsupportedOpsetVersion
import caffe2.python.onnx.backend as backend
Reported by Pylint.
Line: 1
Column: 1
from test_pytorch_common import TestCase, run_tests
import torch
import torch.onnx
from torch.onnx import utils, OperatorExportTypes, TrainingMode
from torch.onnx.symbolic_helper import _set_opset_version, _set_operator_export_type, _set_onnx_shape_inference
import torch.utils.cpp_extension
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion, skipIfUnsupportedOpsetVersion
import caffe2.python.onnx.backend as backend
Reported by Pylint.
Line: 3
Column: 1
from test_pytorch_common import TestCase, run_tests
import torch
import torch.onnx
from torch.onnx import utils, OperatorExportTypes, TrainingMode
from torch.onnx.symbolic_helper import _set_opset_version, _set_operator_export_type, _set_onnx_shape_inference
import torch.utils.cpp_extension
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion, skipIfUnsupportedOpsetVersion
import caffe2.python.onnx.backend as backend
Reported by Pylint.
Line: 4
Column: 1
from test_pytorch_common import TestCase, run_tests
import torch
import torch.onnx
from torch.onnx import utils, OperatorExportTypes, TrainingMode
from torch.onnx.symbolic_helper import _set_opset_version, _set_operator_export_type, _set_onnx_shape_inference
import torch.utils.cpp_extension
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion, skipIfUnsupportedOpsetVersion
import caffe2.python.onnx.backend as backend
Reported by Pylint.
Line: 5
Column: 1
import torch
import torch.onnx
from torch.onnx import utils, OperatorExportTypes, TrainingMode
from torch.onnx.symbolic_helper import _set_opset_version, _set_operator_export_type, _set_onnx_shape_inference
import torch.utils.cpp_extension
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion, skipIfUnsupportedOpsetVersion
import caffe2.python.onnx.backend as backend
from verify import verify
Reported by Pylint.
Line: 6
Column: 1
import torch
import torch.onnx
from torch.onnx import utils, OperatorExportTypes, TrainingMode
from torch.onnx.symbolic_helper import _set_opset_version, _set_operator_export_type, _set_onnx_shape_inference
import torch.utils.cpp_extension
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion, skipIfUnsupportedOpsetVersion
import caffe2.python.onnx.backend as backend
from verify import verify
Reported by Pylint.
Line: 7
Column: 1
import torch.onnx
from torch.onnx import utils, OperatorExportTypes, TrainingMode
from torch.onnx.symbolic_helper import _set_opset_version, _set_operator_export_type, _set_onnx_shape_inference
import torch.utils.cpp_extension
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion, skipIfUnsupportedOpsetVersion
import caffe2.python.onnx.backend as backend
from verify import verify
import torchvision
Reported by Pylint.
Line: 9
Column: 1
from torch.onnx.symbolic_helper import _set_opset_version, _set_operator_export_type, _set_onnx_shape_inference
import torch.utils.cpp_extension
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion, skipIfUnsupportedOpsetVersion
import caffe2.python.onnx.backend as backend
from verify import verify
import torchvision
import onnx
Reported by Pylint.
Line: 12
Column: 1
import caffe2.python.onnx.backend as backend
from verify import verify
import torchvision
import onnx
import io
import copy
Reported by Pylint.
Line: 14
Column: 1
import torchvision
import onnx
import io
import copy
import unittest
Reported by Pylint.
test/test_dataloader.py
482 issues
Line: 7
Column: 1
import os
import ctypes
import faulthandler
import torch
import gc
import time
import signal
import unittest
import itertools
Reported by Pylint.
Line: 15
Column: 1
import itertools
import warnings
import tempfile
from torch import multiprocessing as mp
from torch.utils.data import _utils, Dataset, IterableDataset, TensorDataset, DataLoader, ConcatDataset, ChainDataset, Subset
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data.dataset import random_split
from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS,
Reported by Pylint.
Line: 16
Column: 1
import warnings
import tempfile
from torch import multiprocessing as mp
from torch.utils.data import _utils, Dataset, IterableDataset, TensorDataset, DataLoader, ConcatDataset, ChainDataset, Subset
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data.dataset import random_split
from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS,
IS_IN_CI, NO_MULTIPROCESSING_SPAWN, skipIfRocm, slowTest,
Reported by Pylint.
Line: 17
Column: 1
import tempfile
from torch import multiprocessing as mp
from torch.utils.data import _utils, Dataset, IterableDataset, TensorDataset, DataLoader, ConcatDataset, ChainDataset, Subset
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data.dataset import random_split
from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS,
IS_IN_CI, NO_MULTIPROCESSING_SPAWN, skipIfRocm, slowTest,
load_tests, TEST_WITH_TSAN, IS_SANDCASTLE)
Reported by Pylint.
Line: 18
Column: 1
from torch import multiprocessing as mp
from torch.utils.data import _utils, Dataset, IterableDataset, TensorDataset, DataLoader, ConcatDataset, ChainDataset, Subset
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data.dataset import random_split
from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS,
IS_IN_CI, NO_MULTIPROCESSING_SPAWN, skipIfRocm, slowTest,
load_tests, TEST_WITH_TSAN, IS_SANDCASTLE)
Reported by Pylint.
Line: 19
Column: 1
from torch.utils.data import _utils, Dataset, IterableDataset, TensorDataset, DataLoader, ConcatDataset, ChainDataset, Subset
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data.dataset import random_split
from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS,
IS_IN_CI, NO_MULTIPROCESSING_SPAWN, skipIfRocm, slowTest,
load_tests, TEST_WITH_TSAN, IS_SANDCASTLE)
try:
Reported by Pylint.
Line: 20
Column: 1
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data.dataset import random_split
from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS,
IS_IN_CI, NO_MULTIPROCESSING_SPAWN, skipIfRocm, slowTest,
load_tests, TEST_WITH_TSAN, IS_SANDCASTLE)
try:
import psutil
Reported by Pylint.
Line: 1400
Column: 9
def test_random_sampler(self):
from collections import Counter
from torch.utils.data import RandomSampler
def sample_stat(sampler, num_samples):
counts = Counter(sampler)
count_repeated = sum(val > 1 for val in counts.values())
return (count_repeated, min(counts.keys()), max(counts.keys()), sum(counts.values()))
Reported by Pylint.
Line: 1434
Column: 9
RandomSampler(self.dataset, replacement=0)
def test_random_sampler_len_with_replacement(self):
from torch.utils.data import RandomSampler
# add 5 extra samples
num_samples = len(self.dataset) + 5
sampler = RandomSampler(self.dataset,
replacement=True,
num_samples=num_samples)
Reported by Pylint.
Line: 1461
Column: 9
count_num_samples_in_data_loader)
def test_distributed_sampler_invalid_rank(self):
from torch.utils.data.distributed import DistributedSampler
dataset = torch.IntTensor(range(10))
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, 3)
with self.assertRaisesRegex(ValueError, "Invalid rank"):
Reported by Pylint.