The following issues were found
test/test_utils.py
215 issues
Line: 426
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b602_subprocess_popen_with_shell_equals_true.html
import subprocess
p = subprocess.Popen(command, stdout=subprocess.PIPE, # noqa: P204
stderr=subprocess.PIPE, shell=True)
try:
output, err = p.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
p.kill()
output, err = p.communicate()
Reported by Bandit.
Line: 800
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b602_subprocess_popen_with_shell_equals_true.html
for shell in [True, False]:
r = subprocess.run(
[exec_path],
shell=shell,
stdout=subprocess.PIPE,
)
self.assertEqual(r.returncode, 0)
self.assertEqual(
# Windows prints "\r\n" for newlines.
Reported by Bandit.
Line: 12
Column: 1
import tempfile
import textwrap
import unittest
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
import torch.cuda
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
Reported by Pylint.
Line: 13
Column: 1
import textwrap
import unittest
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
import torch.cuda
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
import torch.utils.cpp_extension
Reported by Pylint.
Line: 14
Column: 1
import unittest
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
import torch.cuda
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
import torch.utils.cpp_extension
import torch.hub as hub
Reported by Pylint.
Line: 15
Column: 1
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
import torch.cuda
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
import torch.utils.cpp_extension
import torch.hub as hub
from torch.autograd._functions.utils import check_onnx_broadcast
Reported by Pylint.
Line: 16
Column: 1
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
import torch.cuda
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
import torch.utils.cpp_extension
import torch.hub as hub
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
Reported by Pylint.
Line: 17
Column: 1
import torch.utils.data
from torch.utils.data import DataLoader
import torch.cuda
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
import torch.utils.cpp_extension
import torch.hub as hub
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, retry, IS_SANDCASTLE, IS_WINDOWS, has_breakpad
Reported by Pylint.
Line: 18
Column: 1
from torch.utils.data import DataLoader
import torch.cuda
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
import torch.utils.cpp_extension
import torch.hub as hub
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, retry, IS_SANDCASTLE, IS_WINDOWS, has_breakpad
from urllib.error import URLError
Reported by Pylint.
Line: 19
Column: 1
import torch.cuda
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
import torch.utils.cpp_extension
import torch.hub as hub
from torch.autograd._functions.utils import check_onnx_broadcast
from torch.onnx.symbolic_opset9 import _prepare_onnx_paddings
from torch.testing._internal.common_utils import load_tests, retry, IS_SANDCASTLE, IS_WINDOWS, has_breakpad
from urllib.error import URLError
Reported by Pylint.
torch/jit/frontend.py
212 issues
Line: 9
Column: 1
from collections import namedtuple
from textwrap import dedent
from typing import List, Tuple # noqa: F401
from torch._C._jit_tree_views import (
ClassDef, Ident, Stmt, Decl, Def, Var,
EmptyTypeAnnotation, Param, ExprStmt, Assign,
Delete, Return, Raise, Assert, AugAssign, While,
For, If, Pass, Break, Continue, Apply, Dots, Select,
TrueLiteral, FalseLiteral, NoneLiteral, Starred,
Reported by Pylint.
Line: 8
Column: 1
import string
from collections import namedtuple
from textwrap import dedent
from typing import List, Tuple # noqa: F401
from torch._C._jit_tree_views import (
ClassDef, Ident, Stmt, Decl, Def, Var,
EmptyTypeAnnotation, Param, ExprStmt, Assign,
Delete, Return, Raise, Assert, AugAssign, While,
For, If, Pass, Break, Continue, Apply, Dots, Select,
Reported by Pylint.
Line: 8
Column: 1
import string
from collections import namedtuple
from textwrap import dedent
from typing import List, Tuple # noqa: F401
from torch._C._jit_tree_views import (
ClassDef, Ident, Stmt, Decl, Def, Var,
EmptyTypeAnnotation, Param, ExprStmt, Assign,
Delete, Return, Raise, Assert, AugAssign, While,
For, If, Pass, Break, Continue, Apply, Dots, Select,
Reported by Pylint.
Line: 22
Column: 1
)
from torch._sources import get_source_lines_and_file, parse_def, make_source_context
from torch.jit._monkeytype_config import monkeytype_trace, get_qualified_name
from torch._jit_internal import should_drop, is_static_fn, FunctionModifiers # noqa: F401
import torch.jit.annotations
_IS_ASTUNPARSE_INSTALLED = False
try:
import astunparse # type: ignore[import]
Reported by Pylint.
Line: 98
Column: 5
class FrontendError(Exception):
def __init__(self, source_range, msg):
self.source_range = source_range
self.msg = msg
# This has to be instantiated here so the ErrorReport is accurate to the
# call stack when the FrontendError was raised
Reported by Pylint.
Line: 187
Column: 3
def get_jit_class_def(cls, self_name):
# Get defs for each method within the current class independently
# TODO: proper overriding analysis when implementing class inheritance
methods = inspect.getmembers(
cls,
predicate=lambda m: (inspect.ismethod(m) or inspect.isfunction(m))
and not is_static_fn(cls, m.__name__)
and m.__name__ in cls.__dict__
Reported by Pylint.
Line: 205
Column: 73
properties = get_class_properties(cls, self_name)
sourcelines, file_lineno, filename = get_source_lines_and_file(cls, torch._C.ErrorReport.call_stack())
source = ''.join(sourcelines)
dedent_src = dedent(source)
py_ast = ast.parse(dedent_src)
leading_whitespace_len = len(source.split('\n', 1)[0]) - len(dedent_src.split('\n', 1)[0])
ctx = make_source_context(source, filename, file_lineno, leading_whitespace_len, False)
Reported by Pylint.
Line: 258
Column: 21
# If MonkeyType is installed, get all the consolidated type traces
# for the arguments from type_trace_db
type_trace_db = torch.jit._script._get_type_trace_db()
pdt_arg_types = None
if monkeytype_trace:
qualname = get_qualified_name(fn)
pdt_arg_types = type_trace_db.get_args_types(qualname)
Reported by Pylint.
Line: 258
Column: 21
# If MonkeyType is installed, get all the consolidated type traces
# for the arguments from type_trace_db
type_trace_db = torch.jit._script._get_type_trace_db()
pdt_arg_types = None
if monkeytype_trace:
qualname = get_qualified_name(fn)
pdt_arg_types = type_trace_db.get_args_types(qualname)
Reported by Pylint.
Line: 266
Column: 3
return build_def(parsed_def.ctx, fn_def, type_line, def_name, self_name=self_name, pdt_arg_types=pdt_arg_types)
# TODO: more robust handling of recognizing ignore context manager
def is_torch_jit_ignore_context_manager(stmt):
# checks if the statement is torch.jit.ignore context manager
if isinstance(stmt.items[0].context_expr, ast.Call):
# extract torch part
function = stmt.items[0].context_expr.func
Reported by Pylint.
test/jit/test_onnx_export.py
210 issues
Line: 6
Column: 1
import sys
import typing
import torch
import torch.nn as nn
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 7
Column: 1
import typing
import torch
import torch.nn as nn
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.common_utils import suppress_warnings
Reported by Pylint.
Line: 12
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.common_utils import suppress_warnings
from torch.testing._internal.jit_utils import JitTestCase
from torch.onnx import OperatorExportTypes
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
Reported by Pylint.
Line: 13
Column: 1
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.common_utils import suppress_warnings
from torch.testing._internal.jit_utils import JitTestCase
from torch.onnx import OperatorExportTypes
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
Reported by Pylint.
Line: 14
Column: 1
sys.path.append(pytorch_test_dir)
from torch.testing._internal.common_utils import suppress_warnings
from torch.testing._internal.jit_utils import JitTestCase
from torch.onnx import OperatorExportTypes
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 324
Column: 17
def test_onnx_export_shape_reshape(self):
class Foo(torch.nn.Module):
def forward(self, x):
import torch.onnx.operators
x = x.repeat(5, 1, 1)
shape = torch.onnx.operators.shape_as_tensor(x)
reshaped = torch.onnx.operators.reshape_from_tensor_shape(x, shape)
return reshaped
Reported by Pylint.
Line: 29
Column: 9
x = torch.ones(3, 3)
f = io.BytesIO()
torch.onnx._export(AddmmModel(), x, f, verbose=False)
def test_onnx_transpose_incomplete_tensor_type(self):
# Smoke test to get us into the state where we are attempting to export
# a transpose op, where the input is a TensorType without size information.
# This would previously not work, since we would
Reported by Pylint.
Line: 54
Column: 9
tm = torch.jit.trace(tm, torch.rand(3, 4))
example_outputs = (tm(torch.rand(3, 4)),)
f = io.BytesIO()
torch.onnx._export(tm, (torch.rand(3, 4),), f, example_outputs=example_outputs)
def test_export_tensoroption_to(self):
def foo(x):
return x[0].clone().detach().cpu() + x
Reported by Pylint.
Line: 64
Column: 9
example_outputs = traced(torch.rand([2]))
f = io.BytesIO()
torch.onnx._export_to_pretty_string(traced, (torch.rand([2]),), f,
example_outputs=example_outputs)
def test_onnx_export_script_module(self):
class ModuleToExport(torch.jit.ScriptModule):
def __init__(self):
Reported by Pylint.
Line: 69
Column: 13
def test_onnx_export_script_module(self):
class ModuleToExport(torch.jit.ScriptModule):
def __init__(self):
super(ModuleToExport, self).__init__()
@torch.jit.script_method
def forward(self, x):
y = x - x
Reported by Pylint.
torch/testing/_internal/distributed/nn/api/remote_module_test.py
210 issues
Line: 21
Column: 33
)
_PARAM_VAL = torch.nn.Parameter(torch.ones(1))
# RPC handler for querying the device on the destination worker.
def remote_device(module_rref):
for param in module_rref.local_value().parameters():
Reported by Pylint.
Line: 132
Column: 12
class RemoteModuleTest(CommonRemoteModuleTest):
@dist_utils.dist_init
def test_bad_module(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
remote_device = "{}/cpu".format(dst_worker_name)
args = (1,)
kwargs = dict(first_kwarg=2)
Reported by Pylint.
Line: 134
Column: 51
def test_bad_module(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
remote_device = "{}/cpu".format(dst_worker_name)
args = (1,)
kwargs = dict(first_kwarg=2)
with self.assertRaisesRegex(
Reported by Pylint.
Line: 139
Column: 14
args = (1,)
kwargs = dict(first_kwarg=2)
with self.assertRaisesRegex(
ValueError,
r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of <class nn.Module>,",
):
RemoteModule(remote_device, BadModule, args, kwargs).forward()
Reported by Pylint.
Line: 145
Column: 14
):
RemoteModule(remote_device, BadModule, args, kwargs).forward()
with self.assertRaisesRegex(
ValueError,
r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of <class nn.Module>,",
):
RemoteModule(remote_device, BadModule, args, kwargs).forward()
Reported by Pylint.
Line: 154
Column: 12
@dist_utils.dist_init
def test_forward_async(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2, "3")
for remote_module in self._create_remote_module_iter(dst_worker_name):
ret_fut = remote_module.forward_async(*args)
Reported by Pylint.
Line: 156
Column: 51
def test_forward_async(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2, "3")
for remote_module in self._create_remote_module_iter(dst_worker_name):
ret_fut = remote_module.forward_async(*args)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed(args)))
Reported by Pylint.
Line: 157
Column: 17
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2, "3")
for remote_module in self._create_remote_module_iter(dst_worker_name):
ret_fut = remote_module.forward_async(*args)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed(args)))
Reported by Pylint.
Line: 161
Column: 13
for remote_module in self._create_remote_module_iter(dst_worker_name):
ret_fut = remote_module.forward_async(*args)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed(args)))
@dist_utils.dist_init
def test_forward_async_script(self):
if self.rank != 0:
return
Reported by Pylint.
Line: 165
Column: 12
@dist_utils.dist_init
def test_forward_async_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
scripted_remote_module = next(
self._create_remote_module_iter(
Reported by Pylint.
test/quantization/eager/test_quantize_eager_ptq.py
209 issues
Line: 2
Column: 1
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.nn.utils.rnn import PackedSequence
from torch.quantization import (
quantize,
prepare,
convert,
Reported by Pylint.
Line: 3
Column: 1
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.nn.utils.rnn import PackedSequence
from torch.quantization import (
quantize,
prepare,
convert,
Reported by Pylint.
Line: 4
Column: 1
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.nn.utils.rnn import PackedSequence
from torch.quantization import (
quantize,
prepare,
convert,
Reported by Pylint.
Line: 5
Column: 1
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.nn.utils.rnn import PackedSequence
from torch.quantization import (
quantize,
prepare,
convert,
prepare_qat,
Reported by Pylint.
Line: 6
Column: 1
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.nn.utils.rnn import PackedSequence
from torch.quantization import (
quantize,
prepare,
convert,
prepare_qat,
quantize_dynamic,
Reported by Pylint.
Line: 25
Column: 1
default_dynamic_quant_observer,
)
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
AnnotatedSingleLayerLinearModel,
QuantStubModel,
ModelWithFunctionals,
SingleLayerLinearDynamicModel,
Reported by Pylint.
Line: 48
Column: 1
)
# annotated models
from torch.testing._internal.common_quantization import (
AnnotatedTwoLayerLinearModel,
AnnotatedNestedModel,
AnnotatedSubNestedModel,
AnnotatedCustomConfigNestedModel,
AnnotatedSkipQuantModel,
Reported by Pylint.
Line: 56
Column: 1
AnnotatedSkipQuantModel,
)
from torch.testing._internal.common_quantized import (
override_quantized_engine,
supported_qengines,
override_qengines,
)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 61
Column: 1
supported_qengines,
override_qengines,
)
from torch.testing._internal.jit_utils import JitTestCase
from hypothesis import given
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
Reported by Pylint.
Line: 62
Column: 1
override_qengines,
)
from torch.testing._internal.jit_utils import JitTestCase
from hypothesis import given
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
# Standard library
Reported by Pylint.
test/test_profiler.py
204 issues
Line: 8
Column: 1
import os
import unittest
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import (
Reported by Pylint.
Line: 9
Column: 1
import unittest
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import (
TestCase, run_tests, TEST_WITH_ASAN, TEST_WITH_ROCM, IS_WINDOWS,
Reported by Pylint.
Line: 10
Column: 1
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import (
TestCase, run_tests, TEST_WITH_ASAN, TEST_WITH_ROCM, IS_WINDOWS,
TemporaryFileName, TemporaryDirectoryName)
Reported by Pylint.
Line: 11
Column: 1
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import (
TestCase, run_tests, TEST_WITH_ASAN, TEST_WITH_ROCM, IS_WINDOWS,
TemporaryFileName, TemporaryDirectoryName)
from torch.autograd.profiler import profile as _profile
Reported by Pylint.
Line: 12
Column: 1
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import (
TestCase, run_tests, TEST_WITH_ASAN, TEST_WITH_ROCM, IS_WINDOWS,
TemporaryFileName, TemporaryDirectoryName)
from torch.autograd.profiler import profile as _profile
from torch.profiler import (
Reported by Pylint.
Line: 13
Column: 1
import torch.optim
import torch.utils.data
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import (
TestCase, run_tests, TEST_WITH_ASAN, TEST_WITH_ROCM, IS_WINDOWS,
TemporaryFileName, TemporaryDirectoryName)
from torch.autograd.profiler import profile as _profile
from torch.profiler import (
kineto_available, profile, record_function, supported_activities,
Reported by Pylint.
Line: 16
Column: 1
from torch.testing._internal.common_utils import (
TestCase, run_tests, TEST_WITH_ASAN, TEST_WITH_ROCM, IS_WINDOWS,
TemporaryFileName, TemporaryDirectoryName)
from torch.autograd.profiler import profile as _profile
from torch.profiler import (
kineto_available, profile, record_function, supported_activities,
DeviceType, ProfilerAction, ProfilerActivity
)
Reported by Pylint.
Line: 17
Column: 1
TestCase, run_tests, TEST_WITH_ASAN, TEST_WITH_ROCM, IS_WINDOWS,
TemporaryFileName, TemporaryDirectoryName)
from torch.autograd.profiler import profile as _profile
from torch.profiler import (
kineto_available, profile, record_function, supported_activities,
DeviceType, ProfilerAction, ProfilerActivity
)
try:
Reported by Pylint.
Line: 41
Column: 13
t = torch.rand(1, 1).cuda()
p = psutil.Process()
last_rss = collections.deque(maxlen=5)
for outer_idx in range(10):
with _profile(use_cuda=True):
for _ in range(1024):
t = torch.mm(t, t)
gc.collect()
Reported by Pylint.
Line: 65
Column: 20
"""Checks that source code attribution works for eager, TS and autograd mode
"""
# avoid automatic inlining
prev_opt = torch._C._get_graph_executor_optimize()
torch._C._set_graph_executor_optimize(False)
@torch.jit.script
def ts_method_2(x, y):
return torch.matmul(x, y)
Reported by Pylint.
test/distributed/optim/test_zero_redundancy_optimizer.py
202 issues
Line: 15
Column: 1
import numpy as np
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 16
Column: 1
import numpy as np
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.distributed.algorithms.ddp_comm_hooks.ddp_zero_hook import (
Reported by Pylint.
Line: 21
Column: 1
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.distributed.algorithms.ddp_comm_hooks.ddp_zero_hook import (
hook_with_zero_step,
hook_with_zero_step_interleaved,
)
from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import (
allreduce_hook,
Reported by Pylint.
Line: 25
Column: 1
hook_with_zero_step,
hook_with_zero_step_interleaved,
)
from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import (
allreduce_hook,
)
from torch.distributed.algorithms.join import Join, Joinable, JoinHook
from torch.distributed.optim import ZeroRedundancyOptimizer
from torch.distributed.optim.zero_redundancy_optimizer import _broadcast_object
Reported by Pylint.
Line: 28
Column: 1
from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import (
allreduce_hook,
)
from torch.distributed.algorithms.join import Join, Joinable, JoinHook
from torch.distributed.optim import ZeroRedundancyOptimizer
from torch.distributed.optim.zero_redundancy_optimizer import _broadcast_object
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD
from torch.testing._internal import common_distributed, common_utils
Reported by Pylint.
Line: 29
Column: 1
allreduce_hook,
)
from torch.distributed.algorithms.join import Join, Joinable, JoinHook
from torch.distributed.optim import ZeroRedundancyOptimizer
from torch.distributed.optim.zero_redundancy_optimizer import _broadcast_object
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD
from torch.testing._internal import common_distributed, common_utils
from torch.testing._internal.common_utils import IS_WINDOWS
Reported by Pylint.
Line: 30
Column: 1
)
from torch.distributed.algorithms.join import Join, Joinable, JoinHook
from torch.distributed.optim import ZeroRedundancyOptimizer
from torch.distributed.optim.zero_redundancy_optimizer import _broadcast_object
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD
from torch.testing._internal import common_distributed, common_utils
from torch.testing._internal.common_utils import IS_WINDOWS
Reported by Pylint.
Line: 31
Column: 1
from torch.distributed.algorithms.join import Join, Joinable, JoinHook
from torch.distributed.optim import ZeroRedundancyOptimizer
from torch.distributed.optim.zero_redundancy_optimizer import _broadcast_object
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD
from torch.testing._internal import common_distributed, common_utils
from torch.testing._internal.common_utils import IS_WINDOWS
if IS_WINDOWS:
Reported by Pylint.
Line: 32
Column: 1
from torch.distributed.optim import ZeroRedundancyOptimizer
from torch.distributed.optim.zero_redundancy_optimizer import _broadcast_object
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD
from torch.testing._internal import common_distributed, common_utils
from torch.testing._internal.common_utils import IS_WINDOWS
if IS_WINDOWS:
print("Test fails on windows, see https://github.com/pytorch/pytorch/issues/63086")
Reported by Pylint.
Line: 33
Column: 1
from torch.distributed.optim.zero_redundancy_optimizer import _broadcast_object
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD
from torch.testing._internal import common_distributed, common_utils
from torch.testing._internal.common_utils import IS_WINDOWS
if IS_WINDOWS:
print("Test fails on windows, see https://github.com/pytorch/pytorch/issues/63086")
sys.exit(0)
Reported by Pylint.
test/jit/test_models.py
199 issues
Line: 4
Column: 1
import os
import sys
import unittest
from torch.testing._internal.common_utils import enable_profiling_mode_for_profiling_tests, GRAPH_EXECUTOR, ProfilingMode
import torch
import torch.nn as nn
import torch.nn.functional as F
# Make the helper files in test/ importable
Reported by Pylint.
Line: 5
Column: 1
import sys
import unittest
from torch.testing._internal.common_utils import enable_profiling_mode_for_profiling_tests, GRAPH_EXECUTOR, ProfilingMode
import torch
import torch.nn as nn
import torch.nn.functional as F
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 6
Column: 1
import unittest
from torch.testing._internal.common_utils import enable_profiling_mode_for_profiling_tests, GRAPH_EXECUTOR, ProfilingMode
import torch
import torch.nn as nn
import torch.nn.functional as F
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 7
Column: 1
from torch.testing._internal.common_utils import enable_profiling_mode_for_profiling_tests, GRAPH_EXECUTOR, ProfilingMode
import torch
import torch.nn as nn
import torch.nn.functional as F
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA
Reported by Pylint.
Line: 12
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA
from torch.testing._internal.common_utils import slowTest, suppress_warnings
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
Reported by Pylint.
Line: 13
Column: 1
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA
from torch.testing._internal.common_utils import slowTest, suppress_warnings
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
Reported by Pylint.
Line: 14
Column: 1
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA
from torch.testing._internal.common_utils import slowTest, suppress_warnings
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 50
Column: 5
class TestModels(JitTestCase):
@staticmethod
def _test_dcgan_models(self, device, check_export_import=True):
class DCGANGenerator(nn.Module):
def __init__(self, nz, ngf, nc):
super(DCGANGenerator, self).__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
Reported by Pylint.
Line: 77
Column: 31
# state size. (nc) x 64 x 64
)
def forward(self, input):
return self.main(input)
class DCGANDiscriminator(nn.Module):
def __init__(self, nc, ndf):
super(DCGANDiscriminator, self).__init__()
Reported by Pylint.
Line: 104
Column: 31
nn.Sigmoid()
)
def forward(self, input):
return self.main(input).view(-1, 1).squeeze(1)
bs, nz, ngf, nc, ndf = 5, 6, 9, 3, 10
self.checkTrace(DCGANGenerator(nz, ngf, nc).to(device),
(torch.rand(bs, nz, 1, 1, device=device),),
Reported by Pylint.
torch/nn/quantized/dynamic/modules/rnn.py
199 issues
Line: 37
Column: 57
def __init__(self, mode, input_size, hidden_size,
num_layers=1, bias=True, batch_first=False,
dropout=0., bidirectional=False, dtype=torch.qint8):
super(RNNBase, self).__init__()
self.mode = mode
self.input_size = input_size
self.hidden_size = hidden_size
Reported by Pylint.
Line: 78
Column: 24
for direction in range(num_directions):
layer_input_size = input_size if layer == 0 else hidden_size * num_directions
w_ih = torch.randn(gate_size, layer_input_size).to(torch.float)
w_hh = torch.randn(gate_size, hidden_size).to(torch.float)
b_ih = torch.randn(gate_size).to(torch.float)
b_hh = torch.randn(gate_size).to(torch.float)
if dtype == torch.qint8:
w_ih = torch.quantize_per_tensor(w_ih, scale=0.1, zero_point=0, dtype=torch.qint8)
Reported by Pylint.
Line: 78
Column: 68
for direction in range(num_directions):
layer_input_size = input_size if layer == 0 else hidden_size * num_directions
w_ih = torch.randn(gate_size, layer_input_size).to(torch.float)
w_hh = torch.randn(gate_size, hidden_size).to(torch.float)
b_ih = torch.randn(gate_size).to(torch.float)
b_hh = torch.randn(gate_size).to(torch.float)
if dtype == torch.qint8:
w_ih = torch.quantize_per_tensor(w_ih, scale=0.1, zero_point=0, dtype=torch.qint8)
Reported by Pylint.
Line: 79
Column: 24
layer_input_size = input_size if layer == 0 else hidden_size * num_directions
w_ih = torch.randn(gate_size, layer_input_size).to(torch.float)
w_hh = torch.randn(gate_size, hidden_size).to(torch.float)
b_ih = torch.randn(gate_size).to(torch.float)
b_hh = torch.randn(gate_size).to(torch.float)
if dtype == torch.qint8:
w_ih = torch.quantize_per_tensor(w_ih, scale=0.1, zero_point=0, dtype=torch.qint8)
w_hh = torch.quantize_per_tensor(w_hh, scale=0.1, zero_point=0, dtype=torch.qint8)
Reported by Pylint.
Line: 79
Column: 63
layer_input_size = input_size if layer == 0 else hidden_size * num_directions
w_ih = torch.randn(gate_size, layer_input_size).to(torch.float)
w_hh = torch.randn(gate_size, hidden_size).to(torch.float)
b_ih = torch.randn(gate_size).to(torch.float)
b_hh = torch.randn(gate_size).to(torch.float)
if dtype == torch.qint8:
w_ih = torch.quantize_per_tensor(w_ih, scale=0.1, zero_point=0, dtype=torch.qint8)
w_hh = torch.quantize_per_tensor(w_hh, scale=0.1, zero_point=0, dtype=torch.qint8)
Reported by Pylint.
Line: 80
Column: 50
w_ih = torch.randn(gate_size, layer_input_size).to(torch.float)
w_hh = torch.randn(gate_size, hidden_size).to(torch.float)
b_ih = torch.randn(gate_size).to(torch.float)
b_hh = torch.randn(gate_size).to(torch.float)
if dtype == torch.qint8:
w_ih = torch.quantize_per_tensor(w_ih, scale=0.1, zero_point=0, dtype=torch.qint8)
w_hh = torch.quantize_per_tensor(w_hh, scale=0.1, zero_point=0, dtype=torch.qint8)
packed_ih = \
Reported by Pylint.
Line: 80
Column: 24
w_ih = torch.randn(gate_size, layer_input_size).to(torch.float)
w_hh = torch.randn(gate_size, hidden_size).to(torch.float)
b_ih = torch.randn(gate_size).to(torch.float)
b_hh = torch.randn(gate_size).to(torch.float)
if dtype == torch.qint8:
w_ih = torch.quantize_per_tensor(w_ih, scale=0.1, zero_point=0, dtype=torch.qint8)
w_hh = torch.quantize_per_tensor(w_hh, scale=0.1, zero_point=0, dtype=torch.qint8)
packed_ih = \
Reported by Pylint.
Line: 81
Column: 24
w_ih = torch.randn(gate_size, layer_input_size).to(torch.float)
w_hh = torch.randn(gate_size, hidden_size).to(torch.float)
b_ih = torch.randn(gate_size).to(torch.float)
b_hh = torch.randn(gate_size).to(torch.float)
if dtype == torch.qint8:
w_ih = torch.quantize_per_tensor(w_ih, scale=0.1, zero_point=0, dtype=torch.qint8)
w_hh = torch.quantize_per_tensor(w_hh, scale=0.1, zero_point=0, dtype=torch.qint8)
packed_ih = \
torch.ops.quantized.linear_prepack(w_ih, b_ih)
Reported by Pylint.
Line: 81
Column: 50
w_ih = torch.randn(gate_size, layer_input_size).to(torch.float)
w_hh = torch.randn(gate_size, hidden_size).to(torch.float)
b_ih = torch.randn(gate_size).to(torch.float)
b_hh = torch.randn(gate_size).to(torch.float)
if dtype == torch.qint8:
w_ih = torch.quantize_per_tensor(w_ih, scale=0.1, zero_point=0, dtype=torch.qint8)
w_hh = torch.quantize_per_tensor(w_hh, scale=0.1, zero_point=0, dtype=torch.qint8)
packed_ih = \
torch.ops.quantized.linear_prepack(w_ih, b_ih)
Reported by Pylint.
Line: 82
Column: 29
w_hh = torch.randn(gate_size, hidden_size).to(torch.float)
b_ih = torch.randn(gate_size).to(torch.float)
b_hh = torch.randn(gate_size).to(torch.float)
if dtype == torch.qint8:
w_ih = torch.quantize_per_tensor(w_ih, scale=0.1, zero_point=0, dtype=torch.qint8)
w_hh = torch.quantize_per_tensor(w_hh, scale=0.1, zero_point=0, dtype=torch.qint8)
packed_ih = \
torch.ops.quantized.linear_prepack(w_ih, b_ih)
packed_hh = \
Reported by Pylint.
caffe2/python/workspace_test.py
195 issues
Line: 8
Column: 1
from collections import namedtuple
import caffe2.python.hypothesis_test_util as htu
import hypothesis.strategies as st
import numpy as np
import torch
from caffe2.proto import caffe2_pb2
from caffe2.python import core, test_util, workspace, model_helper, brew
from hypothesis import given, settings
Reported by Pylint.
Line: 13
Column: 1
import torch
from caffe2.proto import caffe2_pb2
from caffe2.python import core, test_util, workspace, model_helper, brew
from hypothesis import given, settings
class TestWorkspace(unittest.TestCase):
def setUp(self):
self.net = core.Net("test-net")
Reported by Pylint.
Line: 74
Column: 38
self.assertEqual(workspace.HasBlob("testblob"), True)
def testCurrentWorkspaceWrapper(self):
self.assertNotIn("testblob", workspace.C.Workspace.current.blobs)
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True
)
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertIn("testblob", workspace.C.Workspace.current.blobs)
Reported by Pylint.
Line: 79
Column: 35
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True
)
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertIn("testblob", workspace.C.Workspace.current.blobs)
workspace.ResetWorkspace()
self.assertNotIn("testblob", workspace.C.Workspace.current.blobs)
def testRunPlan(self):
plan = core.Plan("test-plan")
Reported by Pylint.
Line: 81
Column: 38
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertIn("testblob", workspace.C.Workspace.current.blobs)
workspace.ResetWorkspace()
self.assertNotIn("testblob", workspace.C.Workspace.current.blobs)
def testRunPlan(self):
plan = core.Plan("test-plan")
plan.AddStep(core.ExecutionStep("test-step", self.net))
self.assertEqual(workspace.RunPlan(plan.Proto().SerializeToString()), True)
Reported by Pylint.
Line: 112
Column: 14
self.assertEqual(workspace.HasBlob("testblob"), False)
def testTensorAccess(self):
ws = workspace.C.Workspace()
""" test in-place modification """
ws.create_blob("tensor").feed(np.array([1.1, 1.2, 1.3]))
tensor = ws.blobs["tensor"].tensor()
tensor.data[0] = 3.3
Reported by Pylint.
Line: 337
Column: 13
workspace.FetchBlob("foo"), np.array([2, 2, 777, 2, 777])
)
z = torch.ones((4,), dtype=torch.int64)
workspace.FeedBlob("bar", z)
workspace.RunOperatorOnce(
core.CreateOperator("Reshape", ["bar"], ["bar", "_"], shape=(2, 2))
)
z[0, 1] = 123
Reported by Pylint.
Line: 337
Column: 36
workspace.FetchBlob("foo"), np.array([2, 2, 777, 2, 777])
)
z = torch.ones((4,), dtype=torch.int64)
workspace.FeedBlob("bar", z)
workspace.RunOperatorOnce(
core.CreateOperator("Reshape", ["bar"], ["bar", "_"], shape=(2, 2))
)
z[0, 1] = 123
Reported by Pylint.
Line: 435
Column: 13
workspace.FetchBlob("foo"), np.array([2, 2, 777, 2, 777])
)
z = torch.ones((4,), dtype=torch.int64, device="cuda")
workspace.FeedBlob("bar", z)
workspace.RunOperatorOnce(
core.CreateOperator(
"Reshape",
["bar"],
Reported by Pylint.
Line: 435
Column: 36
workspace.FetchBlob("foo"), np.array([2, 2, 777, 2, 777])
)
z = torch.ones((4,), dtype=torch.int64, device="cuda")
workspace.FeedBlob("bar", z)
workspace.RunOperatorOnce(
core.CreateOperator(
"Reshape",
["bar"],
Reported by Pylint.