The following issues were found
test/run_test.py
75 issues
Line: 593
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b602_subprocess_popen_with_shell_equals_true.html
'mpiexec --allow-run-as-root -n 1 bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
noprefix_opt = '--noprefix' if subprocess.call(
f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
mpiexec = ['mpiexec', '-n', '3', noprefix_opt, allowrunasroot_opt]
return_code = run_test(test_module, test_directory, options,
Reported by Bandit.
Line: 15
Column: 1
import sys
import tempfile
import torch
from torch.utils import cpp_extension
from torch.testing._internal.common_utils import FILE_SCHEMA, IS_IN_CI, TEST_WITH_ROCM, shell, set_cwd
import torch.distributed as dist
from typing import Dict, Optional, List
Reported by Pylint.
Line: 16
Column: 1
import tempfile
import torch
from torch.utils import cpp_extension
from torch.testing._internal.common_utils import FILE_SCHEMA, IS_IN_CI, TEST_WITH_ROCM, shell, set_cwd
import torch.distributed as dist
from typing import Dict, Optional, List
try:
Reported by Pylint.
Line: 17
Column: 1
import torch
from torch.utils import cpp_extension
from torch.testing._internal.common_utils import FILE_SCHEMA, IS_IN_CI, TEST_WITH_ROCM, shell, set_cwd
import torch.distributed as dist
from typing import Dict, Optional, List
try:
# using tools/ to optimize test run.
Reported by Pylint.
Line: 18
Column: 1
import torch
from torch.utils import cpp_extension
from torch.testing._internal.common_utils import FILE_SCHEMA, IS_IN_CI, TEST_WITH_ROCM, shell, set_cwd
import torch.distributed as dist
from typing import Dict, Optional, List
try:
# using tools/ to optimize test run.
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
Reported by Pylint.
Line: 1078
Column: 13
print_to_stderr(err_message)
finally:
if options.coverage:
from coverage import Coverage
test_dir = os.path.dirname(os.path.abspath(__file__))
with set_cwd(test_dir):
cov = Coverage()
if PYTORCH_COLLECT_COVERAGE:
cov.load()
Reported by Pylint.
Line: 473
Column: 3
executable = get_executable_command(options, allow_pytest=not extra_unittest_args,
disable_coverage=disable_coverage)
# TODO: move this logic into common_utils.py instead of passing in "-k" individually
# The following logic for running specified tests will only run for non-distributed tests, as those are dispatched
# to test_distributed and not run_test (this function)
if options.run_specified_test_cases:
unittest_args.extend(get_test_case_args(test_module, 'pytest' in executable))
Reported by Pylint.
Line: 543
Column: 35
os.remove(test_directory + '/' + test_module + '.py')
def test_cpp_extensions_aot_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot(test_directory, options, use_ninja=True)
def test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot(test_directory, options, use_ninja=False)
Reported by Pylint.
Line: 547
Column: 38
return _test_cpp_extensions_aot(test_directory, options, use_ninja=True)
def test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot(test_directory, options, use_ninja=False)
def test_distributed(test_module, test_directory, options):
# MPI tests are broken with Python-3.9
Reported by Pylint.
Line: 623
Column: 1
class TestChoices(list):
def __init__(self, *args, **kwargs):
super(TestChoices, self).__init__(args[0])
def __contains__(self, item):
return list.__contains__(self, parse_test_module(item))
Reported by Pylint.
caffe2/python/trt/test_trt.py
75 issues
Line: 8
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import onnx
import onnx.defs
from onnx.helper import make_node, make_graph, make_tensor_value_info, make_model
from onnx.backend.base import namedtupledict
from caffe2.python.models.download import ModelDownloader
import caffe2.python.onnx.backend as c2
Reported by Pylint.
Line: 9
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import onnx
import onnx.defs
from onnx.helper import make_node, make_graph, make_tensor_value_info, make_model
from onnx.backend.base import namedtupledict
from caffe2.python.models.download import ModelDownloader
import caffe2.python.onnx.backend as c2
from caffe2.python.onnx.workspace import Workspace
Reported by Pylint.
Line: 10
Column: 1
from caffe2.python import core, workspace
import onnx
import onnx.defs
from onnx.helper import make_node, make_graph, make_tensor_value_info, make_model
from onnx.backend.base import namedtupledict
from caffe2.python.models.download import ModelDownloader
import caffe2.python.onnx.backend as c2
from caffe2.python.onnx.workspace import Workspace
from caffe2.python.trt.transform import convert_onnx_model_to_trt_op, transform_caffe2_net
Reported by Pylint.
Line: 11
Column: 1
import onnx
import onnx.defs
from onnx.helper import make_node, make_graph, make_tensor_value_info, make_model
from onnx.backend.base import namedtupledict
from caffe2.python.models.download import ModelDownloader
import caffe2.python.onnx.backend as c2
from caffe2.python.onnx.workspace import Workspace
from caffe2.python.trt.transform import convert_onnx_model_to_trt_op, transform_caffe2_net
from caffe2.python.onnx.tests.test_utils import TestCase
Reported by Pylint.
Line: 108
Column: 26
np.testing.assert_almost_equal(Y_c2, Y_trt)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_relu_graph_simple(self):
X = np.random.randn(1, 1, 3, 2).astype(np.float32)
self._test_relu_graph(X, 1, 50)
Reported by Pylint.
Line: 114
Column: 26
self._test_relu_graph(X, 1, 50)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_relu_graph_big_batch(self):
X = np.random.randn(52, 1, 3, 2).astype(np.float32)
self._test_relu_graph(X, 52, 50)
def _test_onnx_importer(self, model_name, data_input_index, opset_version=onnx.defs.onnx_opset_version()):
Reported by Pylint.
Line: 143
Column: 26
Y_trt = namedtupledict('Outputs', op_outputs)(*output_values)
np.testing.assert_allclose(Y_c2, Y_trt, rtol=1e-3)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_resnet50(self):
self._test_onnx_importer('resnet50', 0, 9)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_bvlc_alexnet(self):
Reported by Pylint.
Line: 147
Column: 26
def test_resnet50(self):
self._test_onnx_importer('resnet50', 0, 9)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_bvlc_alexnet(self):
self._test_onnx_importer('bvlc_alexnet', 0, 9)
@unittest.skip("Until fixing Unsqueeze op")
def test_densenet121(self):
Reported by Pylint.
Line: 155
Column: 26
def test_densenet121(self):
self._test_onnx_importer('densenet121', -1, 3)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_inception_v1(self):
self._test_onnx_importer('inception_v1', -3, 9)
@unittest.skip("Until fixing Unsqueeze op")
def test_inception_v2(self):
Reported by Pylint.
Line: 167
Column: 26
def test_shufflenet(self):
self._test_onnx_importer('shufflenet', 0)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_squeezenet(self):
self._test_onnx_importer('squeezenet', -1, 9)
@unittest.skipIf(not workspace.C.use_trt, "No TensortRT support")
def test_vgg16(self):
Reported by Pylint.
caffe2/python/net_printer.py
74 issues
Line: 8
Column: 1
from caffe2.proto.caffe2_pb2 import OperatorDef, NetDef
from caffe2.python.checkpoint import Job
from caffe2.python.core import Net, ExecutionStep, Plan
from caffe2.python.task import Task, TaskGroup, WorkspaceType, TaskOutput
from collections import defaultdict
from contextlib import contextmanager
Reported by Pylint.
Line: 188
Column: 9
class Printer(Visitor, Text):
def __init__(self, factor_prefixes=False, c2_syntax=True):
super(Visitor, self).__init__()
super(Text, self).__init__()
self.factor_prefixes = factor_prefixes
self.c2_syntax = c2_syntax
self.c2_net_name = None
Reported by Pylint.
Line: 189
Column: 9
class Printer(Visitor, Text):
def __init__(self, factor_prefixes=False, c2_syntax=True):
super(Visitor, self).__init__()
super(Text, self).__init__()
self.factor_prefixes = factor_prefixes
self.c2_syntax = c2_syntax
self.c2_net_name = None
Reported by Pylint.
Line: 62
Column: 13
@contextmanager
def set_workspace(self, node=None, ws=None, do_copy=False):
if ws is not None:
ws = ws
elif node is not None:
ws = self.workspaces[str(node)]
else:
ws = self.workspace
if do_copy:
Reported by Pylint.
Line: 264
Column: 5
)
if x
)
call = '%s(%s)' % (op, inputs)
return call if not outputs else '%s = %s' % (
factor_prefix(outputs, factor_prefixes), call)
def format_device_option(dev_opt):
Reported by Pylint.
Line: 415
Column: 5
before each operator execution. Use for debugging purposes.
"""
assert isinstance(net, Net)
debug_net = Net(str(net))
assert isinstance(net, Net)
for op in net.Proto().op:
text = Text()
print_op(op, text)
debug_net.LogInfo(str(text))
Reported by Pylint.
Line: 419
Column: 9
assert isinstance(net, Net)
for op in net.Proto().op:
text = Text()
print_op(op, text)
debug_net.LogInfo(str(text))
debug_net.Proto().op.extend([op])
return debug_net
Reported by Pylint.
Line: 1
Column: 1
## @package net_printer
# Module caffe2.python.net_printer
from caffe2.proto.caffe2_pb2 import OperatorDef, NetDef
from caffe2.python.checkpoint import Job
Reported by Pylint.
Line: 12
Column: 1
from caffe2.python.checkpoint import Job
from caffe2.python.core import Net, ExecutionStep, Plan
from caffe2.python.task import Task, TaskGroup, WorkspaceType, TaskOutput
from collections import defaultdict
from contextlib import contextmanager
from copy import copy
from future.utils import viewkeys
from itertools import chain
from six import binary_type, text_type
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python.core import Net, ExecutionStep, Plan
from caffe2.python.task import Task, TaskGroup, WorkspaceType, TaskOutput
from collections import defaultdict
from contextlib import contextmanager
from copy import copy
from future.utils import viewkeys
from itertools import chain
from six import binary_type, text_type
Reported by Pylint.
test/quantization/jit/test_deprecated_jit_quant.py
74 issues
Line: 1
Column: 1
import torch
from torch.testing._internal.common_quantization import (
skipIfNoFBGEMM
)
from torch.testing._internal.common_utils import suppress_warnings
from torch.testing._internal.jit_utils import JitTestCase
from typing import Tuple
import copy
Reported by Pylint.
Line: 2
Column: 1
import torch
from torch.testing._internal.common_quantization import (
skipIfNoFBGEMM
)
from torch.testing._internal.common_utils import suppress_warnings
from torch.testing._internal.jit_utils import JitTestCase
from typing import Tuple
import copy
Reported by Pylint.
Line: 5
Column: 1
from torch.testing._internal.common_quantization import (
skipIfNoFBGEMM
)
from torch.testing._internal.common_utils import suppress_warnings
from torch.testing._internal.jit_utils import JitTestCase
from typing import Tuple
import copy
Reported by Pylint.
Line: 6
Column: 1
skipIfNoFBGEMM
)
from torch.testing._internal.common_utils import suppress_warnings
from torch.testing._internal.jit_utils import JitTestCase
from typing import Tuple
import copy
class TestDeprecatedJitQuantized(JitTestCase):
Reported by Pylint.
Line: 86
Column: 25
class ScriptWrapper(torch.jit.ScriptModule):
def __init__(self, cell):
super(ScriptWrapper, self).__init__()
self.cell = cell
@torch.jit.script_method
def forward(self, x: torch.Tensor, hiddens: torch.Tensor) -> torch.Tensor:
return self.cell(x, hiddens)
Reported by Pylint.
Line: 210
Column: 29
for cell in [cell_int8, cell_fp16]:
class ScriptWrapper(torch.jit.ScriptModule):
def __init__(self, cell):
super(ScriptWrapper, self).__init__()
self.cell = cell
@torch.jit.script_method
def forward(self, x, hiddens):
# type: (torch.Tensor, Tuple[torch.Tensor, torch.Tensor])
Reported by Pylint.
Line: 183
Column: 50
wrapper = ScriptWrapper(cell)
# Compare quantize scripted module to unquantized
script_out, script_hid = wrapper(x, hiddens)
torch.testing.assert_allclose(script_out, ref_out)
for out, ref in zip(script_hid, ref_hid):
torch.testing.assert_allclose(out, ref)
# Compare export/import to unquantized
Reported by Pylint.
Line: 183
Column: 53
wrapper = ScriptWrapper(cell)
# Compare quantize scripted module to unquantized
script_out, script_hid = wrapper(x, hiddens)
torch.testing.assert_allclose(script_out, ref_out)
for out, ref in zip(script_hid, ref_hid):
torch.testing.assert_allclose(out, ref)
# Compare export/import to unquantized
Reported by Pylint.
Line: 184
Column: 59
# Compare quantize scripted module to unquantized
script_out, script_hid = wrapper(x, hiddens)
torch.testing.assert_allclose(script_out, ref_out)
for out, ref in zip(script_hid, ref_hid):
torch.testing.assert_allclose(out, ref)
# Compare export/import to unquantized
export_import_wrapper = self.getExportImportCopyWithPacking(wrapper)
Reported by Pylint.
Line: 185
Column: 49
# Compare quantize scripted module to unquantized
script_out, script_hid = wrapper(x, hiddens)
torch.testing.assert_allclose(script_out, ref_out)
for out, ref in zip(script_hid, ref_hid):
torch.testing.assert_allclose(out, ref)
# Compare export/import to unquantized
export_import_wrapper = self.getExportImportCopyWithPacking(wrapper)
ei_out, ei_hid = export_import_wrapper(x, hiddens)
Reported by Pylint.
torch/utils/model_dump/__init__.py
74 issues
Line: 17
Column: 1
"""
# Maintainer notes follow.
"""
The implementation strategy has tension between 3 goals:
- Small file size.
- Fully self-contained.
- Easy, modern JS environment.
Using Preact and HTM achieves 1 and 2 with a decent result for 3.
Reported by Pylint.
Line: 131
Column: 59
}
if typename == "torch._utils._rebuild_tensor_v2":
assert data.state is None
storage, offset, size, stride, requires_grad, hooks = data.args
storage_info = get_storage_info(storage)
return {"__tensor_v2__": [storage_info, offset, size, stride, requires_grad]}
if typename == "torch._utils._rebuild_qtensor":
assert data.state is None
storage, offset, size, stride, quantizer, requires_grad, hooks = data.args
Reported by Pylint.
Line: 172
Column: 3
assert isinstance(msg, str)
# Hack: Pretend this is a module so we don't need custom serialization.
# Hack: Wrap the message in a tuple so it looks like a nice state object.
# TODO: Undo at least that second hack. We should support string states.
return {
"__module_type__": typename,
"state": hierarchical_pickle((msg,)),
}
raise Exception(f"Can't prepare fake object of type for JS: {typename}")
Reported by Pylint.
Line: 256
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b301-pickle
# Parse debug info and add begin/end markers if not present
# to ensure that we cover the entire source code.
debug_info_t = pickle.loads(raw_debug)
assert isinstance(debug_info_t, tuple)
debug_info = list(debug_info_t)
if not debug_info:
debug_info.append((0, (('', '', 0), 0, 0)))
if debug_info[-1][0] != len(raw_code):
Reported by Bandit.
Line: 271
Column: 3
assert end > start
source, s_start, s_end = source_range
s_text, s_file, s_line = source
# TODO: Handle this case better. TorchScript ranges are in bytes,
# but JS doesn't really handle byte strings.
# if bytes and chars are not equivalent for this string,
# zero out the ranges so we don't highlight the wrong thing.
if len(s_text) != len(s_text.encode("utf-8")):
s_start = 0
Reported by Pylint.
Line: 304
Column: 3
if not zi.filename.endswith(".pkl"):
continue
with zf.open(zi) as handle:
# TODO: handle errors here and just ignore the file?
# NOTE: For a lot of these files (like bytecode),
# we could get away with just unpickling, but this should be safer.
obj = torch.utils.show_pickle.DumpUnpickler(handle, catch_invalid_utf8=True).load()
buf = io.StringIO()
pprint.pprint(obj, buf)
Reported by Pylint.
Line: 66
Column: 1
(they probably don't work at all right now).
"""
import sys
import os
import io
import pathlib
import re
import argparse
Reported by Pylint.
Line: 67
Column: 1
"""
import sys
import os
import io
import pathlib
import re
import argparse
import zipfile
Reported by Pylint.
Line: 68
Column: 1
import sys
import os
import io
import pathlib
import re
import argparse
import zipfile
import json
Reported by Pylint.
Line: 69
Column: 1
import sys
import os
import io
import pathlib
import re
import argparse
import zipfile
import json
import pickle
Reported by Pylint.
torch/distributed/rpc/api.py
74 issues
Line: 12
Column: 1
import torch
from torch.futures import Future
from torch._C._distributed_rpc import (
PyRRef,
RemoteProfilerManager,
WorkerInfo,
get_rpc_timeout,
_cleanup_python_rpc_handler,
Reported by Pylint.
Line: 32
Column: 1
_set_and_start_rpc_agent,
)
from .internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
Reported by Pylint.
Line: 39
Column: 1
_build_rpc_profiling_key,
)
from .constants import DEFAULT_SHUTDOWN_TIMEOUT, UNSET_RPC_TIMEOUT
logger = logging.getLogger(__name__)
# NB: Ignoring RRef leaks during shutdown. Without this, applications have to
# make sure there is no references to any RRef in the application code and
Reported by Pylint.
Line: 61
Column: 5
r"""
rpc_pickler: (.internal._InternalRPCPickler) Overrides the default RPC pickler
"""
global _default_pickler
_default_pickler = rpc_pickler
try:
yield
finally:
_default_pickler = _internal_rpc_pickler
Reported by Pylint.
Line: 109
Column: 5
def _init_rpc_states(agent):
worker_infos = agent.get_worker_infos()
global _ALL_WORKER_NAMES
_ALL_WORKER_NAMES = {worker_info.name for worker_info in worker_infos}
# NB: backend implementation might have already set the rpc_agent.
if not _is_current_rpc_agent_set():
_set_and_start_rpc_agent(agent)
Reported by Pylint.
Line: 263
Column: 9
try:
_all_gather(None, set(worker_names))
except RuntimeError as ex:
logger.error(
f"Failed to complete barrier, got error {ex}"
)
@_require_initialized
def _wait_all_workers():
Reported by Pylint.
Line: 279
Column: 9
try:
_all_gather(None, timeout=DEFAULT_SHUTDOWN_TIMEOUT)
except RuntimeError as ex:
logger.error(
f"Failed to respond to 'Shutdown Proceed' in time, got error {ex}"
)
raise ex
Reported by Pylint.
Line: 451
Column: 33
# under `.. autoclass:: RRef` does not work.
# we have to do the following process to replacee `rpc.PyRRef` with `rpc.RRef`.
#
def method_factory(method_name, docstring):
def method(self, *args, **kwargs):
return getattr(super(RRef, self), method_name)(*args, **kwargs)
method.__doc__ = docstring
return method
Reported by Pylint.
Line: 451
Column: 20
# under `.. autoclass:: RRef` does not work.
# we have to do the following process to replacee `rpc.PyRRef` with `rpc.RRef`.
#
def method_factory(method_name, docstring):
def method(self, *args, **kwargs):
return getattr(super(RRef, self), method_name)(*args, **kwargs)
method.__doc__ = docstring
return method
Reported by Pylint.
Line: 452
Column: 5
# we have to do the following process to replacee `rpc.PyRRef` with `rpc.RRef`.
#
def method_factory(method_name, docstring):
def method(self, *args, **kwargs):
return getattr(super(RRef, self), method_name)(*args, **kwargs)
method.__doc__ = docstring
return method
Reported by Pylint.
caffe2/python/operator_test/filler_ops_test.py
74 issues
Line: 11
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
def _fill_diagonal(shape, value):
Reported by Pylint.
Line: 12
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
def _fill_diagonal(shape, value):
result = np.zeros(shape)
Reported by Pylint.
Line: 26
Column: 32
@given(**hu.gcs)
@settings(deadline=10000)
def test_shape_error(self, gc, dc):
op = core.CreateOperator(
'GaussianFill',
[],
'out',
shape=32, # illegal parameter
Reported by Pylint.
Line: 26
Column: 36
@given(**hu.gcs)
@settings(deadline=10000)
def test_shape_error(self, gc, dc):
op = core.CreateOperator(
'GaussianFill',
[],
'out',
shape=32, # illegal parameter
Reported by Pylint.
Line: 38
Column: 16
exception = False
try:
workspace.RunOperatorOnce(op)
except Exception:
exception = True
self.assertTrue(exception, "Did not throw exception on illegal shape")
op = core.CreateOperator(
'ConstantFill',
Reported by Pylint.
Line: 55
Column: 32
@given(**hu.gcs)
@settings(deadline=10000)
def test_int64_shape(self, gc, dc):
large_dim = 2 ** 31 + 1
net = core.Net("test_shape_net")
net.UniformFill(
[],
'out',
Reported by Pylint.
Line: 55
Column: 36
@given(**hu.gcs)
@settings(deadline=10000)
def test_int64_shape(self, gc, dc):
large_dim = 2 ** 31 + 1
net = core.Net("test_shape_net")
net.UniformFill(
[],
'out',
Reported by Pylint.
Line: 81
Column: 64
**hu.gcs
)
@settings(deadline=10000)
def test_uniform_int_fill_op_blob_input(self, shape, a, b, gc, dc):
net = core.Net('test_net')
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
shape_blob = net.Const(shape, dtype=np.int64)
a_blob = net.Const(a, dtype=np.int32)
Reported by Pylint.
Line: 81
Column: 68
**hu.gcs
)
@settings(deadline=10000)
def test_uniform_int_fill_op_blob_input(self, shape, a, b, gc, dc):
net = core.Net('test_net')
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
shape_blob = net.Const(shape, dtype=np.int64)
a_blob = net.Const(a, dtype=np.int32)
Reported by Pylint.
Line: 106
Column: 47
@given(
**hu.gcs
)
def test_uniform_fill_using_arg(self, gc, dc):
net = core.Net('test_net')
shape = [2**3, 5]
# uncomment this to test filling large blob
# shape = [2**30, 5]
min_v = -100
Reported by Pylint.
torch/_lowrank.py
74 issues
Line: 8
Column: 1
from torch import Tensor
import torch
from . import _linalg_utils as _utils
from .overrides import has_torch_function, handle_torch_function
from typing import Optional, Tuple
def get_approximate_basis(A: Tensor,
Reported by Pylint.
Line: 9
Column: 1
from torch import Tensor
import torch
from . import _linalg_utils as _utils
from .overrides import has_torch_function, handle_torch_function
from typing import Optional, Tuple
def get_approximate_basis(A: Tensor,
q: int,
Reported by Pylint.
Line: 65
Column: 9
dtype = _utils.get_floating_dtype(A)
matmul = _utils.matmul
R = torch.randn(n, q, dtype=dtype, device=A.device)
# The following code could be made faster using torch.geqrf + torch.ormqr
# but geqrf is not differentiable
A_H = _utils.transjugate(A)
if M is None:
Reported by Pylint.
Line: 269
Column: 19
c = torch.sparse.sum(A, dim=(-2,)) / m
# reshape c
column_indices = c.indices()[0]
indices = torch.zeros(2, len(column_indices),
dtype=column_indices.dtype,
device=column_indices.device)
indices[0] = column_indices
C_t = torch.sparse_coo_tensor(
indices, c.values(), (n, 1), dtype=dtype, device=A.device)
Reported by Pylint.
Line: 273
Column: 15
dtype=column_indices.dtype,
device=column_indices.device)
indices[0] = column_indices
C_t = torch.sparse_coo_tensor(
indices, c.values(), (n, 1), dtype=dtype, device=A.device)
ones_m1_t = torch.ones(A.shape[:-2] + (1, m), dtype=dtype, device=A.device)
M = _utils.transpose(torch.sparse.mm(C_t, ones_m1_t))
return _svd_lowrank(A, q, niter=niter, M=M)
Reported by Pylint.
Line: 276
Column: 21
C_t = torch.sparse_coo_tensor(
indices, c.values(), (n, 1), dtype=dtype, device=A.device)
ones_m1_t = torch.ones(A.shape[:-2] + (1, m), dtype=dtype, device=A.device)
M = _utils.transpose(torch.sparse.mm(C_t, ones_m1_t))
return _svd_lowrank(A, q, niter=niter, M=M)
else:
C = A.mean(dim=(-2,), keepdim=True)
return _svd_lowrank(A - C, q, niter=niter, M=None)
Reported by Pylint.
Line: 61
Column: 5
"""
niter = 2 if niter is None else niter
m, n = A.shape[-2:]
dtype = _utils.get_floating_dtype(A)
matmul = _utils.matmul
R = torch.randn(n, q, dtype=dtype, device=A.device)
Reported by Pylint.
Line: 72
Column: 13
A_H = _utils.transjugate(A)
if M is None:
Q = torch.linalg.qr(matmul(A, R)).Q
for i in range(niter):
Q = torch.linalg.qr(matmul(A_H, Q)).Q
Q = torch.linalg.qr(matmul(A, Q)).Q
else:
M_H = _utils.transjugate(M)
Q = torch.linalg.qr(matmul(A, R) - matmul(M, R)).Q
Reported by Pylint.
Line: 11
Column: 1
from . import _linalg_utils as _utils
from .overrides import has_torch_function, handle_torch_function
from typing import Optional, Tuple
def get_approximate_basis(A: Tensor,
q: int,
niter: Optional[int] = 2,
M: Optional[Tensor] = None
Reported by Pylint.
Line: 13
Column: 1
from typing import Optional, Tuple
def get_approximate_basis(A: Tensor,
q: int,
niter: Optional[int] = 2,
M: Optional[Tensor] = None
) -> Tensor:
"""Return tensor :math:`Q` with :math:`q` orthonormal columns such
Reported by Pylint.
caffe2/python/operator_test/group_norm_op_test.py
74 issues
Line: 9
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 10
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 128
Column: 54
order=st.sampled_from(["NCHW", "NHWC"]), **hu.gcs)
@settings(deadline=10000)
def test_group_norm_grad(
self, N, G, D, H, W, epsilon, order, gc, dc):
op = core.CreateOperator(
"GroupNorm",
["X", "gamma", "beta"],
["Y", "mean", "inv_std"],
group=G,
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
Reported by Pylint.
Line: 13
Column: 1
import hypothesis.strategies as st
import numpy as np
import unittest
class TestGroupNormOp(serial.SerializedTestCase):
def group_norm_nchw_ref(self, X, gamma, beta, group, epsilon):
dims = X.shape
Reported by Pylint.
Line: 16
Column: 1
import unittest
class TestGroupNormOp(serial.SerializedTestCase):
def group_norm_nchw_ref(self, X, gamma, beta, group, epsilon):
dims = X.shape
N = dims[0]
C = dims[1]
G = group
Reported by Pylint.
Line: 17
Column: 5
class TestGroupNormOp(serial.SerializedTestCase):
def group_norm_nchw_ref(self, X, gamma, beta, group, epsilon):
dims = X.shape
N = dims[0]
C = dims[1]
G = group
D = int(C / G)
Reported by Pylint.
Line: 17
Column: 5
class TestGroupNormOp(serial.SerializedTestCase):
def group_norm_nchw_ref(self, X, gamma, beta, group, epsilon):
dims = X.shape
N = dims[0]
C = dims[1]
G = group
D = int(C / G)
Reported by Pylint.
Line: 17
Column: 5
class TestGroupNormOp(serial.SerializedTestCase):
def group_norm_nchw_ref(self, X, gamma, beta, group, epsilon):
dims = X.shape
N = dims[0]
C = dims[1]
G = group
D = int(C / G)
Reported by Pylint.
Line: 17
Column: 5
class TestGroupNormOp(serial.SerializedTestCase):
def group_norm_nchw_ref(self, X, gamma, beta, group, epsilon):
dims = X.shape
N = dims[0]
C = dims[1]
G = group
D = int(C / G)
Reported by Pylint.
torch/backends/cudnn/__init__.py
73 issues
Line: 56
Column: 5
CUDNN_TENSOR_DTYPES = {
torch.half,
torch.float,
torch.double,
}
Reported by Pylint.
Line: 57
Column: 5
CUDNN_TENSOR_DTYPES = {
torch.half,
torch.float,
torch.double,
}
def is_available():
Reported by Pylint.
Line: 58
Column: 5
CUDNN_TENSOR_DTYPES = {
torch.half,
torch.float,
torch.double,
}
def is_available():
r"""Returns a bool indicating if CUDNN is currently available."""
Reported by Pylint.
Line: 22
Column: 9
if _cudnn is not None:
def _init():
global __cudnn_version
if __cudnn_version is None:
__cudnn_version = _cudnn.getVersionInt()
runtime_version = _cudnn.getRuntimeVersion()
compile_version = _cudnn.getCompileVersion()
runtime_major, runtime_minor, _ = runtime_version
Reported by Pylint.
Line: 64
Column: 12
def is_available():
r"""Returns a bool indicating if CUDNN is currently available."""
return torch._C.has_cudnn
def is_acceptable(tensor):
if not torch._C._get_cudnn_enabled():
return False
Reported by Pylint.
Line: 68
Column: 12
def is_acceptable(tensor):
if not torch._C._get_cudnn_enabled():
return False
if tensor.device.type != 'cuda' or tensor.dtype not in CUDNN_TENSOR_DTYPES:
return False
if not is_available():
warnings.warn(
Reported by Pylint.
Line: 68
Column: 12
def is_acceptable(tensor):
if not torch._C._get_cudnn_enabled():
return False
if tensor.device.type != 'cuda' or tensor.dtype not in CUDNN_TENSOR_DTYPES:
return False
if not is_available():
warnings.warn(
Reported by Pylint.
Line: 88
Column: 19
def set_flags(_enabled=None, _benchmark=None, _deterministic=None, _allow_tf32=None):
orig_flags = (torch._C._get_cudnn_enabled(),
torch._C._get_cudnn_benchmark(),
torch._C._get_cudnn_deterministic(),
torch._C._get_cudnn_allow_tf32())
if _enabled is not None:
torch._C._set_cudnn_enabled(_enabled)
Reported by Pylint.
Line: 88
Column: 19
def set_flags(_enabled=None, _benchmark=None, _deterministic=None, _allow_tf32=None):
orig_flags = (torch._C._get_cudnn_enabled(),
torch._C._get_cudnn_benchmark(),
torch._C._get_cudnn_deterministic(),
torch._C._get_cudnn_allow_tf32())
if _enabled is not None:
torch._C._set_cudnn_enabled(_enabled)
Reported by Pylint.
Line: 89
Column: 19
def set_flags(_enabled=None, _benchmark=None, _deterministic=None, _allow_tf32=None):
orig_flags = (torch._C._get_cudnn_enabled(),
torch._C._get_cudnn_benchmark(),
torch._C._get_cudnn_deterministic(),
torch._C._get_cudnn_allow_tf32())
if _enabled is not None:
torch._C._set_cudnn_enabled(_enabled)
if _benchmark is not None:
Reported by Pylint.