The following issues were found
test/test_xnnpack_integration.py
291 issues
Line: 3
Column: 1
import unittest
import torch
import torch.backends.xnnpack
from torch.nn import functional as F
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
Reported by Pylint.
Line: 4
Column: 1
import unittest
import torch
import torch.backends.xnnpack
from torch.nn import functional as F
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
Reported by Pylint.
Line: 5
Column: 1
import torch
import torch.backends.xnnpack
from torch.nn import functional as F
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
Reported by Pylint.
Line: 6
Column: 1
import torch
import torch.backends.xnnpack
from torch.nn import functional as F
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
from hypothesis import strategies as st
Reported by Pylint.
Line: 7
Column: 1
import torch.backends.xnnpack
from torch.nn import functional as F
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
from hypothesis import strategies as st
import io
Reported by Pylint.
Line: 8
Column: 1
from torch.nn import functional as F
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
from hypothesis import strategies as st
import io
import itertools
Reported by Pylint.
Line: 9
Column: 1
from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
from hypothesis import strategies as st
import io
import itertools
Reported by Pylint.
Line: 10
Column: 1
from torch.testing import FileCheck
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
from hypothesis import strategies as st
import io
import itertools
from torch.testing._internal.common_utils import TEST_WITH_TSAN
Reported by Pylint.
Line: 11
Column: 1
import torch.testing._internal.hypothesis_utils as hu
from torch.testing._internal.common_utils import TestCase, run_tests, slowTest
from hypothesis import given, assume
from hypothesis import strategies as st
import io
import itertools
from torch.testing._internal.common_utils import TEST_WITH_TSAN
Reported by Pylint.
Line: 15
Column: 1
import io
import itertools
from torch.testing._internal.common_utils import TEST_WITH_TSAN
@unittest.skipUnless(torch.backends.xnnpack.enabled,
" XNNPACK must be enabled for these tests."
" Please build with USE_XNNPACK=1.")
@unittest.skipIf(TEST_WITH_TSAN, "TSAN fails with XNNPACK. Does not seem to have a good reason for failures.")
Reported by Pylint.
test/test_testing.py
289 issues
Line: 11
Column: 1
import unittest
from typing import Any, Callable, Iterator, List, Tuple
import torch
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS, TestCase, make_tensor, run_tests, skipIfRocm, slowTest)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
Reported by Pylint.
Line: 13
Column: 1
import torch
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS, TestCase, make_tensor, run_tests, skipIfRocm, slowTest)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyOnCPUAndCUDA,
deviceCountAtLeast)
Reported by Pylint.
Line: 15
Column: 1
from torch.testing._internal.common_utils import \
(IS_FBCODE, IS_SANDCASTLE, IS_WINDOWS, TestCase, make_tensor, run_tests, skipIfRocm, slowTest)
from torch.testing._internal.common_device_type import \
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyOnCPUAndCUDA,
deviceCountAtLeast)
from torch.testing._internal.common_methods_invocations import op_db
import torch.testing._internal.opinfo_helper as opinfo_helper
Reported by Pylint.
Line: 19
Column: 1
(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, dtypes,
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyOnCPUAndCUDA,
deviceCountAtLeast)
from torch.testing._internal.common_methods_invocations import op_db
import torch.testing._internal.opinfo_helper as opinfo_helper
# For testing TestCase methods and torch.testing functions
class TestTesting(TestCase):
# Ensure that assertEqual handles numpy arrays properly
Reported by Pylint.
Line: 20
Column: 1
get_device_type_test_bases, instantiate_device_type_tests, onlyCUDA, onlyOnCPUAndCUDA,
deviceCountAtLeast)
from torch.testing._internal.common_methods_invocations import op_db
import torch.testing._internal.opinfo_helper as opinfo_helper
# For testing TestCase methods and torch.testing functions
class TestTesting(TestCase):
# Ensure that assertEqual handles numpy arrays properly
@dtypes(*(torch.testing.get_all_dtypes(include_half=True, include_bfloat16=False,
Reported by Pylint.
Line: 46
Column: 3
# Tests that when rtol or atol (including self.precision) is set, then
# the other is zeroed.
# TODO: this is legacy behavior and should be updated after test
# precisions are reviewed to be consistent with torch.isclose.
@onlyOnCPUAndCUDA
def test__comparetensors_legacy(self, device):
a = torch.tensor((10000000.,))
b = torch.tensor((10000002.,))
Reported by Pylint.
Line: 49
Column: 43
# TODO: this is legacy behavior and should be updated after test
# precisions are reviewed to be consistent with torch.isclose.
@onlyOnCPUAndCUDA
def test__comparetensors_legacy(self, device):
a = torch.tensor((10000000.,))
b = torch.tensor((10000002.,))
x = torch.tensor((1.,))
y = torch.tensor((1. + 1e-5,))
Reported by Pylint.
Line: 62
Column: 21
for op in (self._compareTensors, _scalar_helper):
# Tests default
result, debug_msg = op(a, b)
self.assertTrue(result)
# Tests setting atol
result, debug_msg = op(a, b, atol=2, rtol=0)
self.assertTrue(result)
Reported by Pylint.
Line: 82
Column: 46
self.assertFalse(result)
@onlyOnCPUAndCUDA
def test__comparescalars_debug_msg(self, device):
# float x float
result, debug_msg = self._compareScalars(4., 7.)
expected_msg = ("Comparing 4.0 and 7.0 gives a difference of 3.0, "
"but the allowed difference with rtol=1.3e-06 and "
"atol=1e-05 is only 1.9100000000000003e-05!")
Reported by Pylint.
Line: 84
Column: 9
@onlyOnCPUAndCUDA
def test__comparescalars_debug_msg(self, device):
# float x float
result, debug_msg = self._compareScalars(4., 7.)
expected_msg = ("Comparing 4.0 and 7.0 gives a difference of 3.0, "
"but the allowed difference with rtol=1.3e-06 and "
"atol=1e-05 is only 1.9100000000000003e-05!")
self.assertEqual(debug_msg, expected_msg)
Reported by Pylint.
torch/autograd/gradcheck.py
286 issues
Line: 31
Column: 70
out: List[torch.Tensor] = []
for t in input_tensors:
if _is_float_or_complex_tensor(t) and t.requires_grad:
out.append(t.new_zeros((t.numel(), numel_output), layout=torch.strided))
return tuple(out)
def _allocate_jacobians_with_outputs(output_tensors: Tuple, numel_input, dtype=None,
device=None) -> Tuple[torch.Tensor, ...]:
Reported by Pylint.
Line: 42
Column: 60
# width of `t.numel`. Otherwise, for each tensor, returns a 1-d tensor with size
# (t.numel,).
out: List[torch.Tensor] = []
options = {"dtype": dtype, "device": device, "layout": torch.strided}
for t in output_tensors:
if _is_float_or_complex_tensor(t):
out.append(t.new_zeros((numel_input, t.numel()), **options))
return tuple(out)
Reported by Pylint.
Line: 100
Column: 29
indices = x_indices[i].tolist() + list(x_idx)
d_idx = sum(indices[k] * x_stride[k] for k in range(len(x_size)))
yield x_value, x_idx, d_idx
elif x_tensor.layout == torch._mkldnn: # type: ignore[attr-defined]
for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])):
# this is really inefficient, but without indexing implemented, there's
# not really a better way than converting back and forth
x_tensor_dense = x_tensor.to_dense()
yield x_tensor_dense, x_idx, d_idx
Reported by Pylint.
Line: 241
Column: 24
fast_mode=False) -> torch.Tensor:
# Prepares the inputs to be passed into the function while including the new
# modified input.
if input.layout == torch._mkldnn: # type: ignore[attr-defined] # no attr _mkldnn
# Convert back to mkldnn
if maybe_perturbed_input is not None:
return maybe_perturbed_input.to_mkldnn()
else:
return input
Reported by Pylint.
Line: 247
Column: 26
return maybe_perturbed_input.to_mkldnn()
else:
return input
elif input.layout == torch.sparse_coo:
if fast_mode and maybe_perturbed_input is not None:
# entry is already a "cloned" version of the original tensor
# thus changes to entry are not reflected in the input
return maybe_perturbed_input
else:
Reported by Pylint.
Line: 329
Column: 34
dual_inputs = []
for i, inp in enumerate(inputs):
if is_tensor_like(inp) and inp.requires_grad:
if inp.layout == torch._mkldnn: # type: ignore[attr-defined]
raise ValueError("MKLDNN inputs are not support for forward AD gradcheck.")
inp = fwAD.make_dual(inp, torch.zeros_like(inp))
# If inp is a differentiable view, the dual might not be the tangent given to
# make_dual, so read it explicitly from the dual tensor
Reported by Pylint.
Line: 332
Column: 43
if inp.layout == torch._mkldnn: # type: ignore[attr-defined]
raise ValueError("MKLDNN inputs are not support for forward AD gradcheck.")
inp = fwAD.make_dual(inp, torch.zeros_like(inp))
# If inp is a differentiable view, the dual might not be the tangent given to
# make_dual, so read it explicitly from the dual tensor
fw_grads.append(fwAD.unpack_dual(inp)[1])
dual_inputs.append(inp)
Reported by Pylint.
Line: 381
Column: 24
# Prepare the input so that it can be modified in-place and do certain
# operations that require the tensor to have strides. If fast_mode=False,
# _iter_tensor would handle the below cases:
if input.layout == torch._mkldnn: # type: ignore[attr-defined] # no attr _mkldnn
# Convert to dense so we can perform operations that require strided tensors
input_to_perturb = input.to_dense()
elif input.layout == torch.sparse_coo:
# Clone because input may require grad, and copy_ calls resize_,
# which is not allowed for .data
Reported by Pylint.
Line: 384
Column: 26
if input.layout == torch._mkldnn: # type: ignore[attr-defined] # no attr _mkldnn
# Convert to dense so we can perform operations that require strided tensors
input_to_perturb = input.to_dense()
elif input.layout == torch.sparse_coo:
# Clone because input may require grad, and copy_ calls resize_,
# which is not allowed for .data
input_to_perturb = input.clone()
else:
input_to_perturb = input.data
Reported by Pylint.
Line: 412
Column: 27
def _reshape_tensor_or_tuple(u, shape):
# We don't need to reshape when input corresponding to u is sparse
if isinstance(u, tuple):
if u[0].layout != torch.sparse_coo:
return (u[0].reshape(shape), u[1].reshape(shape))
else:
if u.layout != torch.sparse_coo:
return u.reshape(shape)
return u
Reported by Pylint.
test/jit/test_async.py
283 issues
Line: 5
Column: 1
import os
import sys
import torch
import torch.nn as nn
from typing import Any, Tuple
# Make the helper files in test/ importable
Reported by Pylint.
Line: 6
Column: 1
import sys
import torch
import torch.nn as nn
from typing import Any, Tuple
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 13
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, _inline_everything
from typing import List
from torch import Tensor
class TestAsync(JitTestCase):
def test_async_python(self):
Reported by Pylint.
Line: 15
Column: 1
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, _inline_everything
from typing import List
from torch import Tensor
class TestAsync(JitTestCase):
def test_async_python(self):
@torch.jit.script
def foo(x):
Reported by Pylint.
Line: 49
Column: 47
@torch.jit.script
def bar(x):
futures = torch.jit.annotate(List[Future[List[Tensor]]], [])
for _ in range(3):
future = torch.jit.annotate(
Future[List[Tensor]],
torch.jit.fork(foo, x)
)
Reported by Pylint.
Line: 52
Column: 21
futures = torch.jit.annotate(List[Future[List[Tensor]]], [])
for _ in range(3):
future = torch.jit.annotate(
Future[List[Tensor]],
torch.jit.fork(foo, x)
)
futures.append(future)
output = torch.jit.annotate(List[List[Tensor]], [])
Reported by Pylint.
Line: 448
Column: 32
for i in range(3):
input_list.append(input)
fut_list: List[Future[torch.Tensor]] = []
for input_tensor in input_list:
fut_list.append(torch.jit._fork(add_one, input_tensor))
# return list[future[tensor]] here to ensure tracing
# module calls return the correct types
return fut_list
Reported by Pylint.
Line: 25
Column: 9
x = torch.rand(3, 4)
fut = torch.jit.fork(foo, x)
y_hat = foo(x)
y = torch.jit.wait(fut)
# assert nothing; only to make sure the fake python path works
def test_async_future_type_python(self):
def foo(inp):
Reported by Pylint.
Line: 26
Column: 9
x = torch.rand(3, 4)
fut = torch.jit.fork(foo, x)
y_hat = foo(x)
y = torch.jit.wait(fut)
# assert nothing; only to make sure the fake python path works
def test_async_future_type_python(self):
def foo(inp):
futures = torch.jit.annotate(List[torch.jit.Future[torch.Tensor]], [])
Reported by Pylint.
Line: 32
Column: 17
def test_async_future_type_python(self):
def foo(inp):
futures = torch.jit.annotate(List[torch.jit.Future[torch.Tensor]], [])
for i in range(5):
futures.append(torch.jit.fork(lambda x: x, inp))
all_outputs = []
for future in futures:
all_outputs.append(torch.jit.wait(future))
return all_outputs
Reported by Pylint.
caffe2/python/operator_test/torch_integration_test.py
279 issues
Line: 7
Column: 1
import unittest
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import torch
from caffe2.python import core, workspace
from hypothesis import given, settings
from scipy.stats import norm
Reported by Pylint.
Line: 11
Column: 1
import numpy as np
import torch
from caffe2.python import core, workspace
from hypothesis import given, settings
from scipy.stats import norm
def generate_rois(roi_counts, im_dims):
assert len(roi_counts) == len(im_dims)
Reported by Pylint.
Line: 12
Column: 1
import torch
from caffe2.python import core, workspace
from hypothesis import given, settings
from scipy.stats import norm
def generate_rois(roi_counts, im_dims):
assert len(roi_counts) == len(im_dims)
all_rois = []
Reported by Pylint.
Line: 160
Column: 19
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("box_out")
box_out = torch.tensor(bbox_transform_ref())
a, b = torch.ops._caffe2.BBoxTransform(
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
[1.0, 1.0, 1.0, 1.0],
Reported by Pylint.
Line: 162
Column: 13
box_out = torch.tensor(bbox_transform_ref())
a, b = torch.ops._caffe2.BBoxTransform(
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
[1.0, 1.0, 1.0, 1.0],
False,
rotated,
Reported by Pylint.
Line: 163
Column: 13
box_out = torch.tensor(bbox_transform_ref())
a, b = torch.ops._caffe2.BBoxTransform(
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
[1.0, 1.0, 1.0, 1.0],
False,
rotated,
angle_bound_on,
Reported by Pylint.
Line: 164
Column: 13
a, b = torch.ops._caffe2.BBoxTransform(
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
[1.0, 1.0, 1.0, 1.0],
False,
rotated,
angle_bound_on,
-90,
Reported by Pylint.
Line: 183
Column: 60
rotated=st.booleans(),
angle_bound_on=st.booleans(),
clip_angle_thresh=st.sampled_from([-1.0, 1.0]),
batch_splits_dtype=st.sampled_from([torch.float32, torch.int32]),
**hu.gcs_cpu_only
)
def test_box_with_nms_limits(
self,
roi_counts,
Reported by Pylint.
Line: 183
Column: 45
rotated=st.booleans(),
angle_bound_on=st.booleans(),
clip_angle_thresh=st.sampled_from([-1.0, 1.0]),
batch_splits_dtype=st.sampled_from([torch.float32, torch.int32]),
**hu.gcs_cpu_only
)
def test_box_with_nms_limits(
self,
roi_counts,
Reported by Pylint.
Line: 204
Column: 17
pred_bbox, batch_splits = [
t.detach().numpy()
for t in torch.ops._caffe2.BBoxTransform(
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
[1.0, 1.0, 1.0, 1.0],
False,
rotated,
Reported by Pylint.
torch/testing/_internal/jit_utils.py
273 issues
Line: 45
Column: 38
RUN_CUDA_MULTI_GPU = RUN_CUDA and torch.cuda.device_count() > 1
RUN_CUDA_HALF = RUN_CUDA
# HIP supports half, no version check necessary
if torch.cuda.is_available() and not torch.version.hip:
CUDA_VERSION = torch._C._cuda_getCompiledVersion()
for d in range(torch.cuda.device_count()):
major = torch.cuda.get_device_capability(d)[0]
if (major < 6):
RUN_CUDA_HALF = False
Reported by Pylint.
Line: 507
Column: 26
# drop allows us to remove some values from ever being used
# to test unused outputs
if drop is not None:
vs = vs[:-drop]
# we don't want all the grad for all the outputs to be the same
# so we multiply each by a constant
return sum(math.log(i + 2) * v.sum() for i, v in enumerate(vs) if v is not None)
if input_tensors is None:
input_tensors = reference_tensors
Reported by Pylint.
Line: 597
Column: 33
for g2, g2_ge in zip(grads2, grads2_ge):
if g2 is None and g2_ge is None:
continue
self.assertTrue(torch.allclose(g2, g2_ge, atol=8e-4, rtol=8e-4))
return ge
def checkModule(self, nn_module, args):
"""
Reported by Pylint.
Line: 4
Column: 1
# Torch
from torch.autograd import Variable
from torch.autograd.function import _nested_map
from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401
from torch.onnx import OperatorExportTypes
import torch
import torch.cuda
import torch.jit
Reported by Pylint.
Line: 4
Column: 1
# Torch
from torch.autograd import Variable
from torch.autograd.function import _nested_map
from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401
from torch.onnx import OperatorExportTypes
import torch
import torch.cuda
import torch.jit
Reported by Pylint.
Line: 21
Column: 1
from torch.testing._internal.common_utils import IS_WINDOWS, \
freeze_rng_state, enable_profiling_mode_for_profiling_tests, ProfilingMode, TEST_BAILOUTS
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401
# Standard library
from contextlib import contextmanager
from functools import reduce
from io import StringIO
Reported by Pylint.
Line: 46
Column: 20
RUN_CUDA_HALF = RUN_CUDA
# HIP supports half, no version check necessary
if torch.cuda.is_available() and not torch.version.hip:
CUDA_VERSION = torch._C._cuda_getCompiledVersion()
for d in range(torch.cuda.device_count()):
major = torch.cuda.get_device_capability(d)[0]
if (major < 6):
RUN_CUDA_HALF = False
Reported by Pylint.
Line: 46
Column: 20
RUN_CUDA_HALF = RUN_CUDA
# HIP supports half, no version check necessary
if torch.cuda.is_available() and not torch.version.hip:
CUDA_VERSION = torch._C._cuda_getCompiledVersion()
for d in range(torch.cuda.device_count()):
major = torch.cuda.get_device_capability(d)[0]
if (major < 6):
RUN_CUDA_HALF = False
Reported by Pylint.
Line: 53
Column: 5
RUN_CUDA_HALF = False
def execWrapper(code, glob, loc):
exec(code, glob, loc)
def do_input_map(fn, input):
return _nested_map(lambda t: isinstance(t, torch.Tensor), fn)(input)
def clear_class_registry():
Reported by Pylint.
Line: 53
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b102_exec_used.html
RUN_CUDA_HALF = False
def execWrapper(code, glob, loc):
exec(code, glob, loc)
def do_input_map(fn, input):
return _nested_map(lambda t: isinstance(t, torch.Tensor), fn)(input)
def clear_class_registry():
Reported by Bandit.
torch/__init__.py
272 issues
Line: 18
Column: 1
import textwrap
import ctypes
import warnings
from .autocast_mode import autocast
if sys.version_info < (3,):
raise Exception("Python 2 has reached end-of-life and is no longer supported by PyTorch.")
from ._utils import _import_dotted_name
from ._utils_internal import get_file_path, prepare_multiprocessing_environment, \
Reported by Pylint.
Line: 22
Column: 1
if sys.version_info < (3,):
raise Exception("Python 2 has reached end-of-life and is no longer supported by PyTorch.")
from ._utils import _import_dotted_name
from ._utils_internal import get_file_path, prepare_multiprocessing_environment, \
USE_RTLD_GLOBAL_WITH_LIBTORCH, USE_GLOBAL_DEPS
# TODO(torch_deploy) figure out how to freeze version.py in fbcode build
if sys.executable == 'torch_deploy':
__version__ = "torch-deploy-1.8"
Reported by Pylint.
Line: 23
Column: 1
raise Exception("Python 2 has reached end-of-life and is no longer supported by PyTorch.")
from ._utils import _import_dotted_name
from ._utils_internal import get_file_path, prepare_multiprocessing_environment, \
USE_RTLD_GLOBAL_WITH_LIBTORCH, USE_GLOBAL_DEPS
# TODO(torch_deploy) figure out how to freeze version.py in fbcode build
if sys.executable == 'torch_deploy':
__version__ = "torch-deploy-1.8"
else:
Reported by Pylint.
Line: 29
Column: 5
if sys.executable == 'torch_deploy':
__version__ = "torch-deploy-1.8"
else:
from .torch_version import __version__ as __version__
from ._six import string_classes as _string_classes
from typing import Set, Type, TYPE_CHECKING
Reported by Pylint.
Line: 31
Column: 1
else:
from .torch_version import __version__ as __version__
from ._six import string_classes as _string_classes
from typing import Set, Type, TYPE_CHECKING
__all__ = [
'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
Reported by Pylint.
Line: 38
Column: 41
__all__ = [
'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
'no_grad', 'enable_grad', 'rand', 'randn', 'inference_mode',
'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
Reported by Pylint.
Line: 38
Column: 50
__all__ = [
'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
'no_grad', 'enable_grad', 'rand', 'randn', 'inference_mode',
'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
Reported by Pylint.
Line: 38
Column: 68
__all__ = [
'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
'no_grad', 'enable_grad', 'rand', 'randn', 'inference_mode',
'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
Reported by Pylint.
Line: 38
Column: 59
__all__ = [
'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
'no_grad', 'enable_grad', 'rand', 'randn', 'inference_mode',
'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
Reported by Pylint.
Line: 39
Column: 31
'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
'no_grad', 'enable_grad', 'rand', 'randn', 'inference_mode',
'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
'lobpcg', 'use_deterministic_algorithms',
Reported by Pylint.
test/test_unary_ufuncs.py
270 issues
Line: 1
Column: 1
import torch
import numpy as np
import warnings
import math
from itertools import product, chain
from numbers import Number
import random
import unittest
Reported by Pylint.
Line: 11
Column: 1
import random
import unittest
from torch._six import inf, nan
from torch.testing._internal.common_utils import (
TestCase, run_tests, torch_to_numpy_dtype_dict, numpy_to_torch_dtype_dict,
suppress_warnings, make_tensor, TEST_SCIPY, slowTest, skipIfNoSciPy, IS_WINDOWS)
from torch.testing._internal.common_methods_invocations import (
unary_ufuncs, _NOTHING)
Reported by Pylint.
Line: 12
Column: 1
import unittest
from torch._six import inf, nan
from torch.testing._internal.common_utils import (
TestCase, run_tests, torch_to_numpy_dtype_dict, numpy_to_torch_dtype_dict,
suppress_warnings, make_tensor, TEST_SCIPY, slowTest, skipIfNoSciPy, IS_WINDOWS)
from torch.testing._internal.common_methods_invocations import (
unary_ufuncs, _NOTHING)
from torch.testing._internal.common_device_type import (
Reported by Pylint.
Line: 15
Column: 1
from torch.testing._internal.common_utils import (
TestCase, run_tests, torch_to_numpy_dtype_dict, numpy_to_torch_dtype_dict,
suppress_warnings, make_tensor, TEST_SCIPY, slowTest, skipIfNoSciPy, IS_WINDOWS)
from torch.testing._internal.common_methods_invocations import (
unary_ufuncs, _NOTHING)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, ops, dtypes, onlyCPU, onlyOnCPUAndCUDA,
onlyCUDA, dtypesIfCUDA, precisionOverride, skipCUDAIfRocm, dtypesIfCPU,
OpDTypes)
Reported by Pylint.
Line: 17
Column: 1
suppress_warnings, make_tensor, TEST_SCIPY, slowTest, skipIfNoSciPy, IS_WINDOWS)
from torch.testing._internal.common_methods_invocations import (
unary_ufuncs, _NOTHING)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, ops, dtypes, onlyCPU, onlyOnCPUAndCUDA,
onlyCUDA, dtypesIfCUDA, precisionOverride, skipCUDAIfRocm, dtypesIfCPU,
OpDTypes)
from torch.testing import (
floating_types_and, all_types_and_complex_and, floating_and_complex_types_and)
Reported by Pylint.
Line: 21
Column: 1
instantiate_device_type_tests, ops, dtypes, onlyCPU, onlyOnCPUAndCUDA,
onlyCUDA, dtypesIfCUDA, precisionOverride, skipCUDAIfRocm, dtypesIfCPU,
OpDTypes)
from torch.testing import (
floating_types_and, all_types_and_complex_and, floating_and_complex_types_and)
if TEST_SCIPY:
import scipy
Reported by Pylint.
Line: 25
Column: 5
floating_types_and, all_types_and_complex_and, floating_and_complex_types_and)
if TEST_SCIPY:
import scipy
# Refer [scipy reference filter]
# Filter operators for which the reference function
# is available in the current environment (for reference_numerics tests).
reference_filtered_ops = list(filter(lambda op: op.ref is not _NOTHING, unary_ufuncs))
Reported by Pylint.
Line: 192
Column: 3
return generate_tensors_from_vals(vals, device, dtype, domain)
# TODO: port test_unary_out_op_mem_overlap
# TODO: add out= tests (different devices, dtypes, mismatched sizes,
# correct sizes, 0 size, broadcasted out)
# TODO: add test for inplace variants erroring on broadcasted inputs
class TestUnaryUfuncs(TestCase):
exact_dtype = True
Reported by Pylint.
Line: 193
Column: 3
# TODO: port test_unary_out_op_mem_overlap
# TODO: add out= tests (different devices, dtypes, mismatched sizes,
# correct sizes, 0 size, broadcasted out)
# TODO: add test for inplace variants erroring on broadcasted inputs
class TestUnaryUfuncs(TestCase):
exact_dtype = True
Reported by Pylint.
Line: 195
Column: 3
# TODO: port test_unary_out_op_mem_overlap
# TODO: add out= tests (different devices, dtypes, mismatched sizes,
# correct sizes, 0 size, broadcasted out)
# TODO: add test for inplace variants erroring on broadcasted inputs
class TestUnaryUfuncs(TestCase):
exact_dtype = True
# Tests bool tensor negation raises the correct error
def test_neg_error_message(self, device):
Reported by Pylint.
caffe2/python/operator_test/rnn_cell_test.py
270 issues
Line: 16
Column: 1
import caffe2.python.hypothesis_test_util as hu
from functools import partial
from hypothesis import assume, given
from hypothesis import settings as ht_settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 17
Column: 1
from functools import partial
from hypothesis import assume, given
from hypothesis import settings as ht_settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 18
Column: 1
from functools import partial
from hypothesis import assume, given
from hypothesis import settings as ht_settings
import hypothesis.strategies as st
import numpy as np
import unittest
def lstm_unit(*args, **kwargs):
Reported by Pylint.
Line: 1079
Column: 9
workspace.ResetWorkspace()
def test_lstm(self):
self.lstm_base(lstm_type=(rnn_cell.LSTM, lstm_reference))
def test_milstm(self):
self.lstm_base(lstm_type=(rnn_cell.MILSTM, milstm_reference))
@unittest.skip("This is currently numerically unstable")
Reported by Pylint.
Line: 1079
Column: 9
workspace.ResetWorkspace()
def test_lstm(self):
self.lstm_base(lstm_type=(rnn_cell.LSTM, lstm_reference))
def test_milstm(self):
self.lstm_base(lstm_type=(rnn_cell.MILSTM, milstm_reference))
@unittest.skip("This is currently numerically unstable")
Reported by Pylint.
Line: 1079
Column: 9
workspace.ResetWorkspace()
def test_lstm(self):
self.lstm_base(lstm_type=(rnn_cell.LSTM, lstm_reference))
def test_milstm(self):
self.lstm_base(lstm_type=(rnn_cell.MILSTM, milstm_reference))
@unittest.skip("This is currently numerically unstable")
Reported by Pylint.
Line: 1079
Column: 9
workspace.ResetWorkspace()
def test_lstm(self):
self.lstm_base(lstm_type=(rnn_cell.LSTM, lstm_reference))
def test_milstm(self):
self.lstm_base(lstm_type=(rnn_cell.MILSTM, milstm_reference))
@unittest.skip("This is currently numerically unstable")
Reported by Pylint.
Line: 1079
Column: 9
workspace.ResetWorkspace()
def test_lstm(self):
self.lstm_base(lstm_type=(rnn_cell.LSTM, lstm_reference))
def test_milstm(self):
self.lstm_base(lstm_type=(rnn_cell.MILSTM, milstm_reference))
@unittest.skip("This is currently numerically unstable")
Reported by Pylint.
Line: 1079
Column: 9
workspace.ResetWorkspace()
def test_lstm(self):
self.lstm_base(lstm_type=(rnn_cell.LSTM, lstm_reference))
def test_milstm(self):
self.lstm_base(lstm_type=(rnn_cell.MILSTM, milstm_reference))
@unittest.skip("This is currently numerically unstable")
Reported by Pylint.
Line: 1079
Column: 9
workspace.ResetWorkspace()
def test_lstm(self):
self.lstm_base(lstm_type=(rnn_cell.LSTM, lstm_reference))
def test_milstm(self):
self.lstm_base(lstm_type=(rnn_cell.MILSTM, milstm_reference))
@unittest.skip("This is currently numerically unstable")
Reported by Pylint.
torch/distributed/distributed_c10d.py
266 issues
Line: 11
Column: 1
from typing import Dict, Optional, Tuple, Union
import torch
from torch._C._distributed_c10d import (
AllreduceOptions,
AllreduceCoalescedOptions,
AllToAllOptions,
BarrierOptions,
BroadcastOptions,
Reported by Pylint.
Line: 26
Column: 1
ScatterOptions,
Store,
)
from torch._C._distributed_c10d import _get_debug_mode, _DistributedDebugLevel
from torch._six import string_classes
# This module is wildcard imported from torch.distributed.
# TODO: specify __all__
Reported by Pylint.
Line: 32
Column: 1
# This module is wildcard imported from torch.distributed.
# TODO: specify __all__
from .constants import default_pg_timeout
from .rendezvous import rendezvous, register_rendezvous_handler # noqa: F401
_MPI_AVAILABLE = True
_NCCL_AVAILABLE = True
_GLOO_AVAILABLE = True
Reported by Pylint.
Line: 33
Column: 1
# TODO: specify __all__
from .constants import default_pg_timeout
from .rendezvous import rendezvous, register_rendezvous_handler # noqa: F401
_MPI_AVAILABLE = True
_NCCL_AVAILABLE = True
_GLOO_AVAILABLE = True
Reported by Pylint.
Line: 1189
Column: 38
return
tensor_list = [
t if not t.is_complex() else torch.view_as_real(t) for t in tensor_list
]
opts = AllreduceOptions()
opts.reduceOp = op
if group is None:
Reported by Pylint.
Line: 1260
Column: 18
if tensor.is_complex():
if not supports_complex(op):
raise RuntimeError(f"all_reduce does not support {op} on complex tensors")
tensor = torch.view_as_real(tensor)
opts = AllreduceOptions()
opts.reduceOp = op
if group is None:
default_pg = _get_default_group()
Reported by Pylint.
Line: 1316
Column: 45
if any([t.is_complex() for t in tensors]) and not supports_complex(op):
raise RuntimeError(f"all_reduce does not support {op} on complex tensors")
tensors = [t if not t.is_complex() else torch.view_as_real(t) for t in tensors]
opts = AllreduceCoalescedOptions()
opts.reduceOp = op
if group is None:
default_pg = _get_default_group()
Reported by Pylint.
Line: 1480
Column: 39
return
output_tensor_lists = [
[t if not t.is_complex() else torch.view_as_real(t) for t in l]
for l in output_tensor_lists
]
input_tensor_list = [
t if not t.is_complex() else torch.view_as_real(t) for t in input_tensor_list
]
Reported by Pylint.
Line: 1484
Column: 38
for l in output_tensor_lists
]
input_tensor_list = [
t if not t.is_complex() else torch.view_as_real(t) for t in input_tensor_list
]
if group is None:
default_pg = _get_default_group()
work = default_pg.allgather(output_tensor_lists, input_tensor_list)
Reported by Pylint.
Line: 1503
Column: 52
f = io.BytesIO()
_pickler(f).dump(obj)
byte_storage = torch.ByteStorage.from_buffer(f.getvalue()) # type: ignore[attr-defined]
byte_tensor = torch.tensor(byte_storage, dtype=torch.uint8)
local_size = torch.tensor([byte_tensor.numel()], dtype=torch.long)
return byte_tensor, local_size
def _tensor_to_object(tensor, tensor_size):
Reported by Pylint.