The following issues were found
test/test_tensorexpr.py
875 issues
Line: 2
Column: 1
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 3
Column: 1
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 4
Column: 1
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import unittest
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 7
Column: 1
from torch import nn
import unittest
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests
from torch.testing._internal.jit_utils import JitTestCase
class BaseTestClass(JitTestCase):
Reported by Pylint.
Line: 9
Column: 1
from torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests
from torch.testing._internal.jit_utils import JitTestCase
class BaseTestClass(JitTestCase):
def setUp(self):
self.old_profiling_executor = torch._C._jit_set_profiling_executor(True)
Reported by Pylint.
Line: 14
Column: 39
class BaseTestClass(JitTestCase):
def setUp(self):
self.old_profiling_executor = torch._C._jit_set_profiling_executor(True)
self.old_profiling_mode = torch._C._jit_set_profiling_mode(True)
self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(True)
Reported by Pylint.
Line: 14
Column: 39
class BaseTestClass(JitTestCase):
def setUp(self):
self.old_profiling_executor = torch._C._jit_set_profiling_executor(True)
self.old_profiling_mode = torch._C._jit_set_profiling_mode(True)
self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(True)
Reported by Pylint.
Line: 15
Column: 35
class BaseTestClass(JitTestCase):
def setUp(self):
self.old_profiling_executor = torch._C._jit_set_profiling_executor(True)
self.old_profiling_mode = torch._C._jit_set_profiling_mode(True)
self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
Reported by Pylint.
Line: 15
Column: 35
class BaseTestClass(JitTestCase):
def setUp(self):
self.old_profiling_executor = torch._C._jit_set_profiling_executor(True)
self.old_profiling_mode = torch._C._jit_set_profiling_mode(True)
self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
Reported by Pylint.
Line: 17
Column: 36
self.old_profiling_executor = torch._C._jit_set_profiling_executor(True)
self.old_profiling_mode = torch._C._jit_set_profiling_mode(True)
self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
torch._C._jit_set_texpr_fuser_enabled(True)
Reported by Pylint.
torch/testing/_internal/distributed/rpc/dist_autograd_test.py
870 issues
Line: 40
Column: 24
known_context_ids = set()
requires_grad_tensor = torch.ones(3, 3, requires_grad=True)
# Send rpc done info and context_id to
# dst_rank = (self.rank + rank_distance) % self.world_size
# we don't need a lock here since the GIL is held while executing remote
# python UDFs, so access is serialized across several workers.
Reported by Pylint.
Line: 61
Column: 12
def _torch_ones(sizes, requires_grad=False):
return torch.ones(sizes, requires_grad=requires_grad)
# This method must be called on the rref owner, and verifies that the grad of
# rref tensor equals to the given grad.
def _compare_owner_value(context_id, rref, grad):
grads = dist_autograd.get_gradients(context_id)
Reported by Pylint.
Line: 67
Column: 12
# rref tensor equals to the given grad.
def _compare_owner_value(context_id, rref, grad):
grads = dist_autograd.get_gradients(context_id)
return torch.equal(grads[rref.local_value()], grad)
def create_tensor():
return torch.ones((3, 3), requires_grad=True)
Reported by Pylint.
Line: 71
Column: 12
def create_tensor():
return torch.ones((3, 3), requires_grad=True)
@torch.jit.script
def create_torchscript_tensor() -> torch.Tensor:
return torch.ones((3, 3)).requires_grad_()
Reported by Pylint.
Line: 76
Column: 12
@torch.jit.script
def create_torchscript_tensor() -> torch.Tensor:
return torch.ones((3, 3)).requires_grad_()
def my_py_add(t1, t2):
return torch.add(t1, t2)
Reported by Pylint.
Line: 80
Column: 12
def my_py_add(t1, t2):
return torch.add(t1, t2)
def my_scalar_add(a, b):
return a + b
Reported by Pylint.
Line: 88
Column: 11
def my_rref_add(rref_t1, t2):
ret = torch.add(rref_t1.local_value(), t2)
return ret
@torch.jit.script
def my_script_add(t1, t2):
Reported by Pylint.
Line: 94
Column: 12
@torch.jit.script
def my_script_add(t1, t2):
return torch.add(t1, t2)
@torch.jit.script
def my_script_ref_add(ref_t1: RRef[torch.Tensor], t2: torch.Tensor) -> torch.Tensor:
t1 = ref_t1.to_here()
Reported by Pylint.
Line: 100
Column: 12
@torch.jit.script
def my_script_ref_add(ref_t1: RRef[torch.Tensor], t2: torch.Tensor) -> torch.Tensor:
t1 = ref_t1.to_here()
return torch.add(t1, t2)
def my_nested_rref_add(dst, rref_t1, t2):
return rpc.rpc_sync(dst, my_rref_add, args=(rref_t1, t2))
Reported by Pylint.
Line: 213
Column: 33
def _next_rank(self):
if hasattr(self, "dst_rank"):
self.dst_rank = (self.dst_rank + 1) % self.world_size
if self.dst_rank == self.rank:
return self._next_rank()
else:
self.dst_rank = (self.rank + 1) % self.world_size
return self.dst_rank
Reported by Pylint.
torch/nn/functional.py
867 issues
Line: 11
Column: 1
from torch._C import _infer_size, _add_docstr
from torch._torch_docs import reproducibility_notes, tf32_notes
from .._jit_internal import boolean_dispatch, _overload, BroadcastingList1, BroadcastingList2, BroadcastingList3
from ..overrides import (
has_torch_function, has_torch_function_unary, has_torch_function_variadic,
handle_torch_function)
from . import _reduction as _Reduction
from . import grad # noqa: F401
Reported by Pylint.
Line: 12
Column: 1
from torch._torch_docs import reproducibility_notes, tf32_notes
from .._jit_internal import boolean_dispatch, _overload, BroadcastingList1, BroadcastingList2, BroadcastingList3
from ..overrides import (
has_torch_function, has_torch_function_unary, has_torch_function_variadic,
handle_torch_function)
from . import _reduction as _Reduction
from . import grad # noqa: F401
from .modules import utils
Reported by Pylint.
Line: 15
Column: 1
from ..overrides import (
has_torch_function, has_torch_function_unary, has_torch_function_variadic,
handle_torch_function)
from . import _reduction as _Reduction
from . import grad # noqa: F401
from .modules import utils
from .modules.utils import _single, _pair, _triple, _list_with_default
Reported by Pylint.
Line: 16
Column: 1
has_torch_function, has_torch_function_unary, has_torch_function_variadic,
handle_torch_function)
from . import _reduction as _Reduction
from . import grad # noqa: F401
from .modules import utils
from .modules.utils import _single, _pair, _triple, _list_with_default
Tensor = torch.Tensor
Reported by Pylint.
Line: 17
Column: 1
handle_torch_function)
from . import _reduction as _Reduction
from . import grad # noqa: F401
from .modules import utils
from .modules.utils import _single, _pair, _triple, _list_with_default
Tensor = torch.Tensor
Reported by Pylint.
Line: 18
Column: 1
from . import _reduction as _Reduction
from . import grad # noqa: F401
from .modules import utils
from .modules.utils import _single, _pair, _triple, _list_with_default
Tensor = torch.Tensor
conv1d = _add_docstr(
Reported by Pylint.
Line: 24
Column: 5
Tensor = torch.Tensor
conv1d = _add_docstr(
torch.conv1d,
r"""
conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
Applies a 1D convolution over an input signal composed of several input
planes.
Reported by Pylint.
Line: 72
Column: 5
)
conv2d = _add_docstr(
torch.conv2d,
r"""
conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
Applies a 2D convolution over an input image composed of several input
planes.
Reported by Pylint.
Line: 122
Column: 5
) # noqa: E501
conv3d = _add_docstr(
torch.conv3d,
r"""
conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
Applies a 3D convolution over an input image composed of several input
planes.
Reported by Pylint.
Line: 171
Column: 5
) # noqa: E501
conv_transpose1d = _add_docstr(
torch.conv_transpose1d,
r"""
conv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
Applies a 1D transposed convolution operator over an input signal
composed of several input planes, sometimes also called "deconvolution".
Reported by Pylint.
torch/testing/_internal/common_utils.py
827 issues
Line: 47
Column: 1
import numpy as np
from torch.testing import floating_types_and, integral_types, complex_types, get_all_dtypes
import expecttest
from .._core import \
(_compare_tensors_internal, _compare_scalars_internal, _compare_return_type)
import torch
import torch.cuda
Reported by Pylint.
Line: 48
Column: 1
from torch.testing import floating_types_and, integral_types, complex_types, get_all_dtypes
import expecttest
from .._core import \
(_compare_tensors_internal, _compare_scalars_internal, _compare_return_type)
import torch
import torch.cuda
from torch._utils_internal import get_writable_path
Reported by Pylint.
Line: 323
Column: 9
assert not failed, "Some test shards have failed"
elif TEST_SAVE_XML is not None:
# import here so that non-CI doesn't need xmlrunner installed
import xmlrunner # type: ignore[import]
test_filename = sanitize_test_filename(inspect.getfile(sys._getframe(1)))
test_report_path = TEST_SAVE_XML + LOG_SUFFIX
test_report_path = os.path.join(test_report_path, test_filename)
os.makedirs(test_report_path, exist_ok=True)
verbose = '--verbose' in argv or '-v' in argv
Reported by Pylint.
Line: 446
Column: 21
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
numpy_to_torch_dtype_dict = {
np.bool_ : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
Reported by Pylint.
Line: 447
Column: 21
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
numpy_to_torch_dtype_dict = {
np.bool_ : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
Reported by Pylint.
Line: 448
Column: 21
numpy_to_torch_dtype_dict = {
np.bool_ : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
Reported by Pylint.
Line: 449
Column: 21
np.bool_ : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
Reported by Pylint.
Line: 450
Column: 21
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
Reported by Pylint.
Line: 451
Column: 21
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
Reported by Pylint.
Line: 452
Column: 21
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
}
Reported by Pylint.
caffe2/python/hypothesis_test.py
774 issues
Line: 6
Column: 1
import time
from functools import partial, reduce
from future.utils import viewitems, viewkeys
from hypothesis import assume, given, settings, HealthCheck
import hypothesis.strategies as st
import unittest
import threading
from caffe2.python import core, workspace, tt_core, dyndep
Reported by Pylint.
Line: 7
Column: 1
from functools import partial, reduce
from future.utils import viewitems, viewkeys
from hypothesis import assume, given, settings, HealthCheck
import hypothesis.strategies as st
import unittest
import threading
from caffe2.python import core, workspace, tt_core, dyndep
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 23
Column: 5
# result, deadlines are not enforced on CUDA runs.
_hypothesis_settings = settings
def settings(**kwargs):
if 'deadline' in kwargs:
kwargs['deadline'] = None
kwargs.setdefault('max_examples', 50)
def wrapped(f):
Reported by Pylint.
Line: 134
Column: 13
"GT": lambda x1, x2: [x1 > x2],
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in viewitems(ops):
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_sum(self, inputs, in_place, gc, dc):
Reported by Pylint.
Line: 134
Column: 13
"GT": lambda x1, x2: [x1 > x2],
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in viewitems(ops):
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_sum(self, inputs, in_place, gc, dc):
Reported by Pylint.
Line: 134
Column: 13
"GT": lambda x1, x2: [x1 > x2],
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in viewitems(ops):
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_sum(self, inputs, in_place, gc, dc):
Reported by Pylint.
Line: 134
Column: 13
"GT": lambda x1, x2: [x1 > x2],
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in viewitems(ops):
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_sum(self, inputs, in_place, gc, dc):
Reported by Pylint.
Line: 135
Column: 13
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in viewitems(ops):
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_sum(self, inputs, in_place, gc, dc):
op = core.CreateOperator("Sum", ["X1", "X2"],
Reported by Pylint.
Line: 135
Column: 13
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in viewitems(ops):
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_sum(self, inputs, in_place, gc, dc):
op = core.CreateOperator("Sum", ["X1", "X2"],
Reported by Pylint.
Line: 135
Column: 13
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in viewitems(ops):
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_sum(self, inputs, in_place, gc, dc):
op = core.CreateOperator("Sum", ["X1", "X2"],
Reported by Pylint.
torch/overrides.py
738 issues
Line: 79
Column: 9
torch.save,
torch.load,
torch.set_printoptions,
torch.fork,
torch.get_default_dtype,
torch.get_num_interop_threads,
torch.get_num_threads,
torch.init_num_threads,
torch.import_ir_module,
Reported by Pylint.
Line: 80
Column: 9
torch.load,
torch.set_printoptions,
torch.fork,
torch.get_default_dtype,
torch.get_num_interop_threads,
torch.get_num_threads,
torch.init_num_threads,
torch.import_ir_module,
torch.import_ir_module_from_buffer,
Reported by Pylint.
Line: 81
Column: 9
torch.set_printoptions,
torch.fork,
torch.get_default_dtype,
torch.get_num_interop_threads,
torch.get_num_threads,
torch.init_num_threads,
torch.import_ir_module,
torch.import_ir_module_from_buffer,
torch.is_anomaly_enabled,
Reported by Pylint.
Line: 82
Column: 9
torch.fork,
torch.get_default_dtype,
torch.get_num_interop_threads,
torch.get_num_threads,
torch.init_num_threads,
torch.import_ir_module,
torch.import_ir_module_from_buffer,
torch.is_anomaly_enabled,
torch.is_grad_enabled,
Reported by Pylint.
Line: 83
Column: 9
torch.get_default_dtype,
torch.get_num_interop_threads,
torch.get_num_threads,
torch.init_num_threads,
torch.import_ir_module,
torch.import_ir_module_from_buffer,
torch.is_anomaly_enabled,
torch.is_grad_enabled,
torch.merge_type_from_type_comment,
Reported by Pylint.
Line: 84
Column: 9
torch.get_num_interop_threads,
torch.get_num_threads,
torch.init_num_threads,
torch.import_ir_module,
torch.import_ir_module_from_buffer,
torch.is_anomaly_enabled,
torch.is_grad_enabled,
torch.merge_type_from_type_comment,
torch.parse_ir,
Reported by Pylint.
Line: 85
Column: 9
torch.get_num_threads,
torch.init_num_threads,
torch.import_ir_module,
torch.import_ir_module_from_buffer,
torch.is_anomaly_enabled,
torch.is_grad_enabled,
torch.merge_type_from_type_comment,
torch.parse_ir,
torch.parse_schema,
Reported by Pylint.
Line: 86
Column: 9
torch.init_num_threads,
torch.import_ir_module,
torch.import_ir_module_from_buffer,
torch.is_anomaly_enabled,
torch.is_grad_enabled,
torch.merge_type_from_type_comment,
torch.parse_ir,
torch.parse_schema,
torch.parse_type_comment,
Reported by Pylint.
Line: 87
Column: 9
torch.import_ir_module,
torch.import_ir_module_from_buffer,
torch.is_anomaly_enabled,
torch.is_grad_enabled,
torch.merge_type_from_type_comment,
torch.parse_ir,
torch.parse_schema,
torch.parse_type_comment,
torch.set_anomaly_enabled,
Reported by Pylint.
Line: 88
Column: 9
torch.import_ir_module_from_buffer,
torch.is_anomaly_enabled,
torch.is_grad_enabled,
torch.merge_type_from_type_comment,
torch.parse_ir,
torch.parse_schema,
torch.parse_type_comment,
torch.set_anomaly_enabled,
torch.set_flush_denormal,
Reported by Pylint.
test/test_tensor_creation_ops.py
727 issues
Line: 1
Column: 1
import torch
import numpy as np
import sys
import math
import warnings
import unittest
from itertools import product, combinations, combinations_with_replacement, permutations
import random
Reported by Pylint.
Line: 11
Column: 1
from itertools import product, combinations, combinations_with_replacement, permutations
import random
from torch.testing._internal.common_utils import (
TestCase, run_tests, do_test_empty_full, TEST_WITH_ROCM, suppress_warnings,
torch_to_numpy_dtype_dict, slowTest, make_tensor, TEST_SCIPY, IS_MACOS, IS_PPC,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA,
Reported by Pylint.
Line: 15
Column: 1
TestCase, run_tests, do_test_empty_full, TEST_WITH_ROCM, suppress_warnings,
torch_to_numpy_dtype_dict, slowTest, make_tensor, TEST_SCIPY, IS_MACOS, IS_PPC,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA,
onlyCPU, largeTensorTest, precisionOverride, dtypes,
onlyCUDA, skipCPUIf, dtypesIfCUDA, dtypesIfCPU, skipMeta)
# TODO: refactor tri_tests_args, _compare_trilu_indices, run_additional_tri_tests
Reported by Pylint.
Line: 21
Column: 1
onlyCUDA, skipCPUIf, dtypesIfCUDA, dtypesIfCPU, skipMeta)
# TODO: refactor tri_tests_args, _compare_trilu_indices, run_additional_tri_tests
from torch.testing._internal.common_methods_invocations import (
tri_tests_args, _compare_trilu_indices, run_additional_tri_tests)
# TODO: replace with make_tensor
def _generate_input(shape, dtype, device, with_extremal):
Reported by Pylint.
Line: 436
Column: 9
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
def test_block_diag_scipy(self, device):
import scipy.linalg
scipy_tensors_list = [
[
1,
[2],
[],
Reported by Pylint.
Line: 2608
Column: 9
@dtypesIfCUDA(torch.float, torch.double, torch.bfloat16, torch.half, torch.long)
@dtypesIfCPU(torch.float, torch.double, torch.long)
def test_signal_window_functions(self, device, dtype):
import scipy.signal as signal
def test(name, kwargs):
torch_method = getattr(torch, name + '_window')
if not dtype.is_floating_point:
with self.assertRaisesRegex(RuntimeError, r'floating point'):
Reported by Pylint.
Line: 20
Column: 3
onlyCPU, largeTensorTest, precisionOverride, dtypes,
onlyCUDA, skipCPUIf, dtypesIfCUDA, dtypesIfCPU, skipMeta)
# TODO: refactor tri_tests_args, _compare_trilu_indices, run_additional_tri_tests
from torch.testing._internal.common_methods_invocations import (
tri_tests_args, _compare_trilu_indices, run_additional_tri_tests)
# TODO: replace with make_tensor
Reported by Pylint.
Line: 25
Column: 3
tri_tests_args, _compare_trilu_indices, run_additional_tri_tests)
# TODO: replace with make_tensor
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
Reported by Pylint.
Line: 56
Column: 3
return x
# TODO: replace with make_tensor
def _rand_shape(dim, min_size, max_size):
shape = []
for i in range(dim):
shape.append(random.randint(min_size, max_size))
return tuple(shape)
Reported by Pylint.
Line: 59
Column: 9
# TODO: replace with make_tensor
def _rand_shape(dim, min_size, max_size):
shape = []
for i in range(dim):
shape.append(random.randint(min_size, max_size))
return tuple(shape)
# Test suite for tensor creation ops
#
Reported by Pylint.
test/test_binary_ufuncs.py
710 issues
Line: 1
Column: 1
import torch
import numpy as np
import itertools
from itertools import product
import math
import random
import unittest
import warnings
Reported by Pylint.
Line: 13
Column: 1
import operator
from functools import partial
from torch._six import inf, nan
from torch.testing._internal.common_utils import (
TestCase, iter_indices, TEST_WITH_ASAN, run_tests,
torch_to_numpy_dtype_dict, make_tensor, TEST_SCIPY, set_default_dtype)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, onlyCPU, dtypes, dtypesIfCUDA,
Reported by Pylint.
Line: 14
Column: 1
from functools import partial
from torch._six import inf, nan
from torch.testing._internal.common_utils import (
TestCase, iter_indices, TEST_WITH_ASAN, run_tests,
torch_to_numpy_dtype_dict, make_tensor, TEST_SCIPY, set_default_dtype)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, onlyCPU, dtypes, dtypesIfCUDA,
dtypesIfCPU, deviceCountAtLeast, precisionOverride, onlyOnCPUAndCUDA,
Reported by Pylint.
Line: 17
Column: 1
from torch.testing._internal.common_utils import (
TestCase, iter_indices, TEST_WITH_ASAN, run_tests,
torch_to_numpy_dtype_dict, make_tensor, TEST_SCIPY, set_default_dtype)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, onlyCPU, dtypes, dtypesIfCUDA,
dtypesIfCPU, deviceCountAtLeast, precisionOverride, onlyOnCPUAndCUDA,
skipCUDAIfRocm, skipIf)
from torch.testing import all_types_and_complex_and, integral_types_and
Reported by Pylint.
Line: 21
Column: 1
instantiate_device_type_tests, onlyCUDA, onlyCPU, dtypes, dtypesIfCUDA,
dtypesIfCPU, deviceCountAtLeast, precisionOverride, onlyOnCPUAndCUDA,
skipCUDAIfRocm, skipIf)
from torch.testing import all_types_and_complex_and, integral_types_and
if TEST_SCIPY:
import scipy.special
import scipy.integrate
Reported by Pylint.
Line: 24
Column: 5
from torch.testing import all_types_and_complex_and, integral_types_and
if TEST_SCIPY:
import scipy.special
import scipy.integrate
# TODO: remove this
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
Reported by Pylint.
Line: 25
Column: 5
if TEST_SCIPY:
import scipy.special
import scipy.integrate
# TODO: remove this
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
Reported by Pylint.
Line: 982
Column: 57
actual_first_tensor = torch_op(a_t, b)
actual_second_tensor = torch_op(a, b_t)
self.assertEqual(actual_scalar, expected_div)
self.assertEqual(actual_tensor.item(), expected_div)
self.assertEqual(actual_first_tensor, actual_tensor)
self.assertEqual(actual_second_tensor, actual_tensor)
_scalar_helper(operator.truediv, operator.truediv)
Reported by Pylint.
Line: 983
Column: 64
actual_second_tensor = torch_op(a, b_t)
self.assertEqual(actual_scalar, expected_div)
self.assertEqual(actual_tensor.item(), expected_div)
self.assertEqual(actual_first_tensor, actual_tensor)
self.assertEqual(actual_second_tensor, actual_tensor)
_scalar_helper(operator.truediv, operator.truediv)
_scalar_helper(operator.truediv, torch.true_divide)
Reported by Pylint.
Line: 1165
Column: 25
with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
a_t.clone().floor_divide_(b_t)
with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
scripted_floor_divide_tensor(a_t.clone(), b_t)
tmp = a_t.clone()
with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
tmp //= b_t
else:
# Inplace modification is OK when both or neither tensor is
Reported by Pylint.
test/jit/test_list_dict.py
692 issues
Line: 9
Column: 1
from textwrap import dedent
from collections import OrderedDict
from torch import Tensor
import torch
import torch.nn as nn
import types
from torch.testing import FileCheck
Reported by Pylint.
Line: 10
Column: 1
from collections import OrderedDict
from torch import Tensor
import torch
import torch.nn as nn
import types
from torch.testing import FileCheck
# Make the helper files in test/ importable
Reported by Pylint.
Line: 11
Column: 1
from torch import Tensor
import torch
import torch.nn as nn
import types
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 13
Column: 1
import torch
import torch.nn as nn
import types
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 18
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 155
Column: 13
with self.assertRaisesRegexWithHighlight(RuntimeError, "deletion at a single index", "x[1:3]"):
@torch.jit.script
def fn(x: List[int]) -> List[int]:
del x[1:3]
return x
def test_list_keyword(self):
def foo():
Reported by Pylint.
Line: 2439
Column: 34
return self
def __next__(self):
if self.value == limit:
raise StopIteration()
ret = self.value
self.value += 1
return ret
Reported by Pylint.
Line: 77
Column: 13
def test_list_literal(self):
def reassign():
x = [1]
if 1 == 1:
x = [2, 3]
return
self.checkScript(reassign, (), optimize=False)
Reported by Pylint.
Line: 84
Column: 13
self.checkScript(reassign, (), optimize=False)
def reassign_arity_change():
x = [1]
if 1 == 1:
x = [1, 2, 3]
return
self.checkScript(reassign_arity_change, (), optimize=False)
Reported by Pylint.
Line: 91
Column: 13
self.checkScript(reassign_arity_change, (), optimize=False)
def reassign_from_empty_literal():
x = []
if 1 == 1:
x = [1, 2, 3]
return
with self.assertRaisesRegexWithHighlight(RuntimeError, r"previously has type List\[Tensor\]", "x"):
self.checkScript(reassign_from_empty_literal, (), optimize=False)
Reported by Pylint.
test/test_vmap.py
637 issues
Line: 1
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor, vmap
import functools
import itertools
import warnings
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
Reported by Pylint.
Line: 2
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor, vmap
import functools
import itertools
import warnings
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
Reported by Pylint.
Line: 3
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor, vmap
import functools
import itertools
import warnings
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
Reported by Pylint.
Line: 4
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor, vmap
import functools
import itertools
import warnings
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
Reported by Pylint.
Line: 8
Column: 1
import functools
import itertools
import warnings
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
import types
FALLBACK_REGEX = r'falling back to slow \(for loop( and stack)?\) implementation'
Reported by Pylint.
Line: 17
Column: 9
class EnableVmapFallbackWarnings:
def __enter__(self):
self.prev_state = torch._C._debug_only_are_vmap_fallback_warnings_enabled()
torch._C._debug_only_display_vmap_fallback_warnings(True)
def __exit__(self, *ignored):
torch._C._debug_only_display_vmap_fallback_warnings(self.prev_state)
Reported by Pylint.
Line: 26
Column: 13
class TestVmapAPI(TestCase):
def test_non_tensor_output_raises(self):
with self.assertRaisesRegex(ValueError, "got type <class 'float'> as the return"):
output = vmap(lambda x: 3.14)(torch.ones(3))
def multiple_outputs(x):
return x, 3
with self.assertRaisesRegex(ValueError, "got type <class 'int'> for return 1"):
Reported by Pylint.
Line: 51
Column: 17
def foo():
return torch.randn(3)
def bar(x):
return torch.randn(3)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(foo)()
Reported by Pylint.
Line: 234
Column: 23
y = torch.randn(2, 3, 5, 7)
# Inner vmap has non-zero out_dim
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y))(y)
self.assertEqual(result.shape, (2, 5, 3, 7))
self.assertEqual(result, y.permute(0, 2, 1, 3))
# all vmaps have non-zero out_dim
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y), out_dims=1)(y)
Reported by Pylint.
Line: 239
Column: 23
self.assertEqual(result, y.permute(0, 2, 1, 3))
# all vmaps have non-zero out_dim
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y), out_dims=1)(y)
self.assertEqual(result.shape, (5, 2, 3, 7))
self.assertEqual(result, y.permute(2, 0, 1, 3))
# throwing in some negative out_dims
result = vmap(lambda y: vmap(lambda x: x, out_dims=-1)(y), out_dims=-1)(y)
Reported by Pylint.