The following issues were found
caffe2/python/operator_test/learning_rate_op_test.py
59 issues
Line: 10
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import copy
from functools import partial
import math
Reported by Pylint.
Line: 11
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import copy
from functools import partial
import math
import numpy as np
Reported by Pylint.
Line: 22
Column: 47
class TestLearningRate(serial.SerializedTestCase):
@given(**hu.gcs_cpu_only)
@settings(deadline=None, max_examples=50)
def test_alter_learning_rate_op(self, gc, dc):
iter = np.random.randint(low=1, high=1e5, size=1)
active_period = int(np.random.randint(low=1, high=1e3, size=1))
inactive_period = int(np.random.randint(low=1, high=1e3, size=1))
base_lr = float(np.random.random(1))
Reported by Pylint.
Line: 23
Column: 9
@given(**hu.gcs_cpu_only)
@settings(deadline=None, max_examples=50)
def test_alter_learning_rate_op(self, gc, dc):
iter = np.random.randint(low=1, high=1e5, size=1)
active_period = int(np.random.randint(low=1, high=1e3, size=1))
inactive_period = int(np.random.randint(low=1, high=1e3, size=1))
base_lr = float(np.random.random(1))
def ref(iter):
Reported by Pylint.
Line: 28
Column: 17
inactive_period = int(np.random.randint(low=1, high=1e3, size=1))
base_lr = float(np.random.random(1))
def ref(iter):
iter = float(iter)
reminder = iter % (active_period + inactive_period)
if reminder < active_period:
return (np.array(base_lr), )
else:
Reported by Pylint.
Line: 50
Column: 46
self.assertReferenceChecks(gc, op, [iter], ref)
@given(**hu.gcs_cpu_only)
def test_hill_learning_rate_op(self, gc, dc):
iter = np.random.randint(low=1, high=1e5, size=1)
num_iter = int(np.random.randint(low=1e2, high=1e8, size=1))
start_multiplier = 1e-4
gamma = 1.0
Reported by Pylint.
Line: 51
Column: 9
@given(**hu.gcs_cpu_only)
def test_hill_learning_rate_op(self, gc, dc):
iter = np.random.randint(low=1, high=1e5, size=1)
num_iter = int(np.random.randint(low=1e2, high=1e8, size=1))
start_multiplier = 1e-4
gamma = 1.0
power = 0.5
Reported by Pylint.
Line: 60
Column: 17
end_multiplier = 1e-2
base_lr = float(np.random.random(1))
def ref(iter):
iter = float(iter)
if iter < num_iter:
lr = start_multiplier + (
1.0 - start_multiplier
) * iter / num_iter
Reported by Pylint.
Line: 87
Column: 47
self.assertReferenceChecks(gc, op, [iter], ref)
@given(**hu.gcs_cpu_only)
def test_slope_learning_rate_op(self, gc, dc):
iter = np.random.randint(low=1, high=1e5, size=1)
num_iter_1 = int(np.random.randint(low=1e2, high=1e3, size=1))
multiplier_1 = 1.0
num_iter_2 = num_iter_1 + int(np.random.randint(low=1e2, high=1e3, size=1))
Reported by Pylint.
Line: 88
Column: 9
@given(**hu.gcs_cpu_only)
def test_slope_learning_rate_op(self, gc, dc):
iter = np.random.randint(low=1, high=1e5, size=1)
num_iter_1 = int(np.random.randint(low=1e2, high=1e3, size=1))
multiplier_1 = 1.0
num_iter_2 = num_iter_1 + int(np.random.randint(low=1e2, high=1e3, size=1))
multiplier_2 = 0.5
Reported by Pylint.
torch/jit/mobile/__init__.py
59 issues
Line: 48
Column: 22
map_location = validate_map_location(map_location)
if isinstance(f, str) or isinstance(f, pathlib.Path):
cpp_module = torch._C._load_for_lite_interpreter(f, map_location)
else:
cpp_module = torch._C._load_for_lite_interpreter_from_buffer(f.read(), map_location)
return LiteScriptModule(cpp_module)
Reported by Pylint.
Line: 48
Column: 22
map_location = validate_map_location(map_location)
if isinstance(f, str) or isinstance(f, pathlib.Path):
cpp_module = torch._C._load_for_lite_interpreter(f, map_location)
else:
cpp_module = torch._C._load_for_lite_interpreter_from_buffer(f.read(), map_location)
return LiteScriptModule(cpp_module)
Reported by Pylint.
Line: 50
Column: 22
if isinstance(f, str) or isinstance(f, pathlib.Path):
cpp_module = torch._C._load_for_lite_interpreter(f, map_location)
else:
cpp_module = torch._C._load_for_lite_interpreter_from_buffer(f.read(), map_location)
return LiteScriptModule(cpp_module)
class LiteScriptModule(object):
def __init__(self, cpp_module):
Reported by Pylint.
Line: 50
Column: 22
if isinstance(f, str) or isinstance(f, pathlib.Path):
cpp_module = torch._C._load_for_lite_interpreter(f, map_location)
else:
cpp_module = torch._C._load_for_lite_interpreter_from_buffer(f.read(), map_location)
return LiteScriptModule(cpp_module)
class LiteScriptModule(object):
def __init__(self, cpp_module):
Reported by Pylint.
Line: 59
Column: 1
self._c = cpp_module
super(LiteScriptModule, self).__init__()
def __call__(self, *input):
return self._c.forward(input)
def find_method(self, method_name):
return self._c.find_method(method_name)
Reported by Pylint.
Line: 65
Column: 1
def find_method(self, method_name):
return self._c.find_method(method_name)
def forward(self, *input):
return self._c.forward(input)
def run_method(self, method_name, *input):
return self._c.run_method(method_name, input)
Reported by Pylint.
Line: 68
Column: 1
def forward(self, *input):
return self._c.forward(input)
def run_method(self, method_name, *input):
return self._c.run_method(method_name, input)
def _export_operator_list(module: LiteScriptModule):
r"""
return a set of root operator names (with overload name) that are used by any method
Reported by Pylint.
Line: 76
Column: 12
return a set of root operator names (with overload name) that are used by any method
in this mobile module.
"""
return torch._C._export_operator_list(module._c)
def _get_model_bytecode_version(f_input) -> int:
r"""
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
Reported by Pylint.
Line: 76
Column: 12
return a set of root operator names (with overload name) that are used by any method
in this mobile module.
"""
return torch._C._export_operator_list(module._c)
def _get_model_bytecode_version(f_input) -> int:
r"""
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
Reported by Pylint.
Line: 76
Column: 43
return a set of root operator names (with overload name) that are used by any method
in this mobile module.
"""
return torch._C._export_operator_list(module._c)
def _get_model_bytecode_version(f_input) -> int:
r"""
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
Reported by Pylint.
docs/source/conf.py
59 issues
Line: 25
Column: 1
# source code directory, relative to this file, for sphinx-autobuild
# sys.path.insert(0, os.path.abspath('../..'))
import torch
try:
import torchvision # noqa: F401
except ImportError:
import warnings
Reported by Pylint.
Line: 35
Column: 1
RELEASE = os.environ.get('RELEASE', False)
import pytorch_sphinx_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
Reported by Pylint.
Line: 274
Column: 1
# in torch.html by overriding the visit_reference method of html writers.
# Someday this can be removed, once the old links fade away
from sphinx.writers import html, html5
def replace(Klass):
old_call = Klass.visit_reference
def visit_reference(self, node):
Reported by Pylint.
Line: 364
Column: 1
# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
# See http://stackoverflow.com/a/41184353/3343043
from docutils import nodes
from sphinx.util.docfields import TypedField
from sphinx import addnodes
import sphinx.ext.doctest
# Without this, doctest adds any example with a `>>>` as a test
Reported by Pylint.
Line: 365
Column: 1
# See http://stackoverflow.com/a/41184353/3343043
from docutils import nodes
from sphinx.util.docfields import TypedField
from sphinx import addnodes
import sphinx.ext.doctest
# Without this, doctest adds any example with a `>>>` as a test
doctest_test_doctest_blocks = ''
Reported by Pylint.
Line: 366
Column: 1
from docutils import nodes
from sphinx.util.docfields import TypedField
from sphinx import addnodes
import sphinx.ext.doctest
# Without this, doctest adds any example with a `>>>` as a test
doctest_test_doctest_blocks = ''
doctest_default_flags = sphinx.ext.doctest.doctest.ELLIPSIS
Reported by Pylint.
Line: 367
Column: 1
from docutils import nodes
from sphinx.util.docfields import TypedField
from sphinx import addnodes
import sphinx.ext.doctest
# Without this, doctest adds any example with a `>>>` as a test
doctest_test_doctest_blocks = ''
doctest_default_flags = sphinx.ext.doctest.doctest.ELLIPSIS
doctest_global_setup = '''
Reported by Pylint.
Line: 28
Column: 5
import torch
try:
import torchvision # noqa: F401
except ImportError:
import warnings
warnings.warn('unable to load "torchvision" package')
RELEASE = os.environ.get('RELEASE', False)
Reported by Pylint.
Line: 80
Column: 3
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# TODO: document these and remove them from here.
coverage_ignore_modules = [
"torch.autograd",
"torch.cuda",
"torch.distributed",
Reported by Pylint.
Line: 111
Column: 3
"whichmodule",
"wrap_check_inputs",
# torch
# TODO: This should be documented eventually, but only after
# we build out more support for meta functions and actually
# do a release with it
"empty_meta",
]
Reported by Pylint.
torch/fx/interpreter.py
58 issues
Line: 1
Column: 1
from .graph_module import GraphModule
from .graph import Graph
from .node import Argument, Node, Target, map_arg, map_aggregate
from .proxy import Proxy
from ._symbolic_trace import Tracer
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
class Interpreter:
"""
Reported by Pylint.
Line: 2
Column: 1
from .graph_module import GraphModule
from .graph import Graph
from .node import Argument, Node, Target, map_arg, map_aggregate
from .proxy import Proxy
from ._symbolic_trace import Tracer
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
class Interpreter:
"""
Reported by Pylint.
Line: 3
Column: 1
from .graph_module import GraphModule
from .graph import Graph
from .node import Argument, Node, Target, map_arg, map_aggregate
from .proxy import Proxy
from ._symbolic_trace import Tracer
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
class Interpreter:
"""
Reported by Pylint.
Line: 4
Column: 1
from .graph_module import GraphModule
from .graph import Graph
from .node import Argument, Node, Target, map_arg, map_aggregate
from .proxy import Proxy
from ._symbolic_trace import Tracer
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
class Interpreter:
"""
Reported by Pylint.
Line: 5
Column: 1
from .graph import Graph
from .node import Argument, Node, Target, map_arg, map_aggregate
from .proxy import Proxy
from ._symbolic_trace import Tracer
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
class Interpreter:
"""
An Interpreter executes an FX graph Node-by-Node. This pattern
Reported by Pylint.
Line: 3
Column: 1
from .graph_module import GraphModule
from .graph import Graph
from .node import Argument, Node, Target, map_arg, map_aggregate
from .proxy import Proxy
from ._symbolic_trace import Tracer
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
class Interpreter:
"""
Reported by Pylint.
Line: 84
Column: 68
self.user_to_last_uses.setdefault(user, []).append(n)
for node in reversed(self.module.graph.nodes):
map_arg(node.args, lambda n: register_last_uses(n, node))
map_arg(node.kwargs, lambda n: register_last_uses(n, node))
def run(self, *args, initial_env : Optional[Dict[Node, Any]] = None) -> Any:
"""
Run `module` via interpretation and return the result.
Reported by Pylint.
Line: 85
Column: 70
for node in reversed(self.module.graph.nodes):
map_arg(node.args, lambda n: register_last_uses(n, node))
map_arg(node.kwargs, lambda n: register_last_uses(n, node))
def run(self, *args, initial_env : Optional[Dict[Node, Any]] = None) -> Any:
"""
Run `module` via interpretation and return the result.
Reported by Pylint.
Line: 106
Column: 9
# Positional function args are consumed left-to-right by
# `placeholder` nodes. Use an iterator to keep track of
# position and extract those values.
self.args_iter : Iterator[Any] = iter(args)
for node in self.module.graph.nodes:
if node in self.env:
# Short circuit if we have this value. This could
# be used, for example, for partial evaluation
Reported by Pylint.
Line: 146
Column: 46
# Main Node running APIs
def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``placeholder`` node. Note that this is stateful:
``Interpreter`` maintains an internal iterator over
arguments passed to ``run`` and this method returns
next() on that iterator.
Reported by Pylint.
caffe2/python/data_workers.py
58 issues
Line: 100
Column: 5
batch_columns=None,
timeout=600
):
global global_coordinator
device_option = scope.CurrentDeviceScope()
if (device_option is None):
device_option = caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CPU)
metrics = Metrics(external_loggers)
Reported by Pylint.
Line: 144
Column: 5
target=enqueuer,
name="Enqueuer {} {}".format(input_source_name, scope.CurrentNameScope()),
args=[coordinator, batch_feeder]))
coordinator._workers = workers
global_coordinator.add(coordinator)
return global_coordinator
Reported by Pylint.
Line: 202
Column: 33
return self._internal_queue.get(block=True, timeout=0.5)
except Queue.Empty:
if time.time() - last_warning > 10.0:
log.warning("** Data input is slow: (still) no data in {} secs.".format(
time.time() - start_time))
last_warning = time.time()
continue
return None
Reported by Pylint.
Line: 240
Column: 21
try:
qsize = self._internal_queue.qsize()
if qsize < 2 and (time.time() - self._last_warning) > LOG_INT_SECS:
log.warning("Warning, data loading lagging behind: " +
"queue size={}, name={}".format(qsize, self._input_source_name))
self._last_warning = time.time()
self._counter += 1
self._internal_queue.put(chunk, block=True, timeout=0.5)
self._log_inputs_per_interval(chunk[0].shape[0])
Reported by Pylint.
Line: 297
Column: 21
leftover = []
trimmed_batch = []
for j, b in enumerate(cur_batch):
[c, l] = np.split(
b, [self._batch_size], axis=self._batch_columns[j]
)
leftover.append(l)
trimmed_batch.append(c)
cur_batch = trimmed_batch
Reported by Pylint.
Line: 359
Column: 37
)
workspace.RunOperatorOnce(op)
def _create_caffe2_queues(self, net):
'''
Creates queues on caffe2 side
'''
def create_queue(queue_name, num_blobs, capacity):
workspace.RunOperatorOnce(
Reported by Pylint.
Line: 363
Column: 38
'''
Creates queues on caffe2 side
'''
def create_queue(queue_name, num_blobs, capacity):
workspace.RunOperatorOnce(
core.CreateOperator(
"CreateBlobsQueue",
[], [queue_name],
num_blobs=1,
Reported by Pylint.
Line: 394
Column: 22
if delta_seconds >= LOG_INT_SECS or force:
inputs_per_sec = int(self._inputs / delta_seconds)
qsize = self._internal_queue.qsize()
log.info("{}/{}: {} inputs/sec".format(
self._input_source_name,
self._namescope,
inputs_per_sec,
))
log.info("-- queue: {} batches".format(qsize))
Reported by Pylint.
Line: 399
Column: 22
self._namescope,
inputs_per_sec,
))
log.info("-- queue: {} batches".format(qsize))
# log and reset perf metrics
self._metrics.put_metric(
'inputs_per_sec', inputs_per_sec, False)
self._metrics.put_metric('queue_size', qsize, False)
self._metrics.put_metric(
Reported by Pylint.
Line: 424
Column: 18
return self._queues[queue_name]
def reset_data_input(self, namescope, name, net, batch_size):
log.info("Reset data input {}, batch size {}: ".format(name, batch_size))
for c in self._coordinators:
if c._worker_name == name and c._state._namescope == namescope:
c._state._batch_size = batch_size
c._state._create_caffe2_ops(net)
Reported by Pylint.
caffe2/python/operator_test/bbox_transform_test.py
58 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
# Reference implementation from detectron/lib/utils/boxes.py
def bbox_transform(boxes, deltas, weights=(1.0, 1.0, 1.0, 1.0)):
Reported by Pylint.
Line: 228
Column: 9
angle_bound_on,
clip_angle_thresh,
gc,
dc,
):
"""
Test with all rois belonging to a single image per run.
"""
rois = (
Reported by Pylint.
Line: 294
Column: 9
angle_bound_on,
clip_angle_thresh,
gc,
dc,
):
"""
Test with rois for multiple images in a batch
"""
batch_size = len(roi_counts)
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 15
Column: 1
# Reference implementation from detectron/lib/utils/boxes.py
def bbox_transform(boxes, deltas, weights=(1.0, 1.0, 1.0, 1.0)):
"""Forward transform that maps proposal boxes to predicted ground-truth
boxes using bounding-box regression deltas. See bbox_transform_inv for a
description of the weights argument.
"""
if boxes.shape[0] == 0:
Reported by Pylint.
Line: 30
Column: 9
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
Reported by Pylint.
Line: 30
Column: 13
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
Reported by Pylint.
Line: 30
Column: 17
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
Reported by Pylint.
Line: 30
Column: 5
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
Reported by Pylint.
test/jit/test_data_parallel.py
58 issues
Line: 5
Column: 1
import sys
import unittest
import torch
import torch.nn as nn
import torch.nn.parallel as dp
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 6
Column: 1
import unittest
import torch
import torch.nn as nn
import torch.nn.parallel as dp
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 7
Column: 1
import torch
import torch.nn as nn
import torch.nn.parallel as dp
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA_MULTI_GPU
Reported by Pylint.
Line: 12
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA_MULTI_GPU
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 27
Column: 27
nn.ReLU(), nn.Linear(2, 2))
@torch.jit.ignore
def forward(self, input):
return self.m(input)
class Mpy1(torch.nn.Module):
def __init__(self, block):
super(TestDataParallel.Mpy1, self).__init__()
Reported by Pylint.
Line: 36
Column: 27
self.m = block
@torch.jit.ignore
def forward(self, input):
return self.m.forward(input)
class Mpy2(torch.nn.Module):
def __init__(self, block1, block2):
super(TestDataParallel.Mpy2, self).__init__()
Reported by Pylint.
Line: 46
Column: 27
self.m2 = block2
@torch.jit.ignore
def forward(self, input):
x = self.m1.forward(input)
return self.m2(x)
class Msm(torch.jit.ScriptModule):
Reported by Pylint.
Line: 60
Column: 27
nn.ReLU(), nn.Linear(2, 2))
@torch.jit.script_method
def forward(self, input):
return self.m(input)
class Msm1(torch.jit.ScriptModule):
def __init__(self, block):
super(TestDataParallel.Msm1, self).__init__()
Reported by Pylint.
Line: 69
Column: 27
self.block = block
@torch.jit.script_method
def forward(self, input):
x = self.block(input)
return x
def check_replicas(self, module, replicas, input_shape=(2, 2)):
input = torch.randn(input_shape).cuda()
Reported by Pylint.
Line: 74
Column: 9
return x
def check_replicas(self, module, replicas, input_shape=(2, 2)):
input = torch.randn(input_shape).cuda()
expected_output = module(input).data
for i, replica in enumerate(replicas):
for p in replica.parameters():
self.assertEqual(p.get_device(), i)
for b in replica.buffers():
Reported by Pylint.
test/onnx/test_custom_ops.py
58 issues
Line: 2
Column: 1
import unittest
import torch
import torch.utils.cpp_extension
import onnx
import caffe2.python.onnx.backend as c2
import numpy as np
Reported by Pylint.
Line: 3
Column: 1
import unittest
import torch
import torch.utils.cpp_extension
import onnx
import caffe2.python.onnx.backend as c2
import numpy as np
Reported by Pylint.
Line: 5
Column: 1
import torch
import torch.utils.cpp_extension
import onnx
import caffe2.python.onnx.backend as c2
import numpy as np
from test_pytorch_onnx_caffe2 import do_export
Reported by Pylint.
Line: 6
Column: 1
import torch.utils.cpp_extension
import onnx
import caffe2.python.onnx.backend as c2
import numpy as np
from test_pytorch_onnx_caffe2 import do_export
from test_pytorch_onnx_onnxruntime import run_model_test
Reported by Pylint.
Line: 12
Column: 1
from test_pytorch_onnx_caffe2 import do_export
from test_pytorch_onnx_onnxruntime import run_model_test
from torch.onnx.symbolic_helper import _unimplemented
class TestCustomOps(unittest.TestCase):
def test_custom_add(self):
op_source = """
Reported by Pylint.
Line: 42
Column: 9
def symbolic_custom_add(g, self, other):
return g.op("Add", self, other)
from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic("custom_namespace::custom_add", symbolic_custom_add, 9)
x = torch.randn(2, 3, 4, requires_grad=False)
y = torch.randn(2, 3, 4, requires_grad=False)
Reported by Pylint.
Line: 121
Column: 9
else:
return _unimplemented("prim::PythonOp", "unknown node kind: " + name)
from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic("::prim_PythonOp", symbolic_pythonop, 1)
x = torch.randn(2, 3, 4, requires_grad=True)
model = MyModule()
run_model_test(self, model, input=(x, ))
Reported by Pylint.
Line: 65
Column: 30
class MyClip(torch.autograd.Function):
@staticmethod
def forward(ctx, input, scalar):
ctx.save_for_backward(input)
return input.clamp(min=scalar)
@staticmethod
def symbolic(g, input, scalar):
Reported by Pylint.
Line: 70
Column: 29
return input.clamp(min=scalar)
@staticmethod
def symbolic(g, input, scalar):
return g.op("Clip", input, min_f=scalar)
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
Reported by Pylint.
Line: 90
Column: 30
class MyClip(torch.autograd.Function):
@staticmethod
def forward(ctx, input, scalar):
ctx.save_for_backward(input)
return input.clamp(min=scalar)
class MyRelu(torch.autograd.Function):
Reported by Pylint.
torch/nn/utils/spectral_norm.py
58 issues
Line: 7
Column: 1
import torch
from torch.nn.functional import normalize
from typing import Any, Optional, TypeVar
from ..modules import Module
class SpectralNorm:
# Invariant before and after each forward call:
# u = normalize(W @ v)
Reported by Pylint.
Line: 84
Column: 35
# Spectral norm of weight equals to `u^T W v`, where `u` and `v`
# are the first left and right singular vectors.
# This power iteration produces approximations of `u` and `v`.
v = normalize(torch.mv(weight_mat.t(), u), dim=0, eps=self.eps, out=v)
u = normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u)
if self.n_power_iterations > 0:
# See above on why we need to clone
u = u.clone(memory_format=torch.contiguous_format)
v = v.clone(memory_format=torch.contiguous_format)
Reported by Pylint.
Line: 85
Column: 35
# are the first left and right singular vectors.
# This power iteration produces approximations of `u` and `v`.
v = normalize(torch.mv(weight_mat.t(), u), dim=0, eps=self.eps, out=v)
u = normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u)
if self.n_power_iterations > 0:
# See above on why we need to clone
u = u.clone(memory_format=torch.contiguous_format)
v = v.clone(memory_format=torch.contiguous_format)
Reported by Pylint.
Line: 88
Column: 47
u = normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u)
if self.n_power_iterations > 0:
# See above on why we need to clone
u = u.clone(memory_format=torch.contiguous_format)
v = v.clone(memory_format=torch.contiguous_format)
sigma = torch.dot(u, torch.mv(weight_mat, v))
weight = weight / sigma
return weight
Reported by Pylint.
Line: 89
Column: 47
if self.n_power_iterations > 0:
# See above on why we need to clone
u = u.clone(memory_format=torch.contiguous_format)
v = v.clone(memory_format=torch.contiguous_format)
sigma = torch.dot(u, torch.mv(weight_mat, v))
weight = weight / sigma
return weight
Reported by Pylint.
Line: 91
Column: 17
u = u.clone(memory_format=torch.contiguous_format)
v = v.clone(memory_format=torch.contiguous_format)
sigma = torch.dot(u, torch.mv(weight_mat, v))
weight = weight / sigma
return weight
def remove(self, module: Module) -> None:
with torch.no_grad():
Reported by Pylint.
Line: 91
Column: 30
u = u.clone(memory_format=torch.contiguous_format)
v = v.clone(memory_format=torch.contiguous_format)
sigma = torch.dot(u, torch.mv(weight_mat, v))
weight = weight / sigma
return weight
def remove(self, module: Module) -> None:
with torch.no_grad():
Reported by Pylint.
Line: 112
Column: 38
# (the invariant at top of this class) and `u @ W @ v = sigma`.
# This uses pinverse in case W^T W is not invertible.
v = torch.linalg.multi_dot([weight_mat.t().mm(weight_mat).pinverse(), weight_mat.t(), u.unsqueeze(1)]).squeeze(1)
return v.mul_(target_sigma / torch.dot(u, torch.mv(weight_mat, v)))
@staticmethod
def apply(module: Module, name: str, n_power_iterations: int, dim: int, eps: float) -> 'SpectralNorm':
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
Reported by Pylint.
Line: 112
Column: 51
# (the invariant at top of this class) and `u @ W @ v = sigma`.
# This uses pinverse in case W^T W is not invertible.
v = torch.linalg.multi_dot([weight_mat.t().mm(weight_mat).pinverse(), weight_mat.t(), u.unsqueeze(1)]).squeeze(1)
return v.mul_(target_sigma / torch.dot(u, torch.mv(weight_mat, v)))
@staticmethod
def apply(module: Module, name: str, n_power_iterations: int, dim: int, eps: float) -> 'SpectralNorm':
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
Reported by Pylint.
Line: 116
Column: 24
@staticmethod
def apply(module: Module, name: str, n_power_iterations: int, dim: int, eps: float) -> 'SpectralNorm':
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
raise RuntimeError("Cannot register two spectral_norm hooks on "
"the same parameter {}".format(name))
fn = SpectralNorm(name, n_power_iterations, dim, eps)
Reported by Pylint.
benchmarks/fastrnns/bench.py
58 issues
Line: 3
Column: 1
import argparse
from collections import namedtuple
import torch
import gc
import sys
import json
import copy
import time
Reported by Pylint.
Line: 10
Column: 1
import copy
import time
from .fuser import set_fuser
from .runner import get_nn_runners
BenchResult = namedtuple('BenchResult', [
'name', 'avg_fwd', 'std_fwd', 'info_fwd', 'avg_bwd', 'std_bwd', 'info_bwd',
Reported by Pylint.
Line: 11
Column: 1
import time
from .fuser import set_fuser
from .runner import get_nn_runners
BenchResult = namedtuple('BenchResult', [
'name', 'avg_fwd', 'std_fwd', 'info_fwd', 'avg_bwd', 'std_bwd', 'info_bwd',
])
Reported by Pylint.
Line: 32
Column: 18
return str(item)
def print_header(colwidth=16, sep=' '):
items = []
for item in BenchResult._fields:
items.append(fit_str(item))
return sep.join(items)
Reported by Pylint.
Line: 39
Column: 31
return sep.join(items)
def pretty_print(benchresult, colwidth=16, sep=' '):
items = []
for thing in benchresult:
items.append(fit_str(to_str(thing)))
return sep.join(items)
Reported by Pylint.
Line: 51
Column: 9
pass
def record(self):
self.time = time.perf_counter()
def elapsed_time(self, end_event):
assert isinstance(end_event, Event)
return end_event.time - self.time
Reported by Pylint.
Line: 79
Column: 3
forward_output = modeldef.forward(*modeldef.inputs)
fwd_end_event.record()
# XXX: Use if need to print something
# print(modeldef.forward.graph_for(*modeldef.inputs))
if modeldef.backward_setup is not None:
backward_input = modeldef.backward_setup(forward_output)
else:
Reported by Pylint.
Line: 114
Column: 5
modeldef = rnn_creator(**creator_args)
[train_batch(modeldef) for _ in range(warmup)]
results = [train_batch(modeldef) for _ in range(nloops)]
fwd_times, bwd_times = zip(*results)
fwd_times = torch.tensor(fwd_times)
Reported by Pylint.
Line: 116
Column: 5
[train_batch(modeldef) for _ in range(warmup)]
results = [train_batch(modeldef) for _ in range(nloops)]
fwd_times, bwd_times = zip(*results)
fwd_times = torch.tensor(fwd_times)
bwd_times = torch.tensor(bwd_times)
return BenchResult(name=name,
Reported by Pylint.
Line: 130
Column: 1
info_bwd=bwd_times)
def print_stderr(*args, **kwargs):
kwargs['file'] = sys.stderr
return print(*args, **kwargs)
def print_json_oss_format(results):
Reported by Pylint.