The following issues were found
test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
144 issues
Line: 20
Column: 1
from unittest import TestCase
from unittest.mock import MagicMock, Mock, call, patch
from torch.distributed import Store
from torch.distributed.elastic.rendezvous import (
RendezvousClosedError,
RendezvousError,
RendezvousParameters,
RendezvousStateError,
Reported by Pylint.
Line: 21
Column: 1
from unittest.mock import MagicMock, Mock, call, patch
from torch.distributed import Store
from torch.distributed.elastic.rendezvous import (
RendezvousClosedError,
RendezvousError,
RendezvousParameters,
RendezvousStateError,
RendezvousTimeoutError,
Reported by Pylint.
Line: 28
Column: 1
RendezvousStateError,
RendezvousTimeoutError,
)
from torch.distributed.elastic.rendezvous.dynamic_rendezvous import (
DynamicRendezvousHandler,
RendezvousBackend,
RendezvousSettings,
RendezvousTimeout,
Token,
Reported by Pylint.
Line: 90
Column: 21
with self.assertRaisesRegex(
ValueError, rf"^The join timeout \({join_timeout}\) must be positive.$"
):
timeout = RendezvousTimeout(join_timeout)
class NodeDescTest(TestCase):
def test_repr(self) -> None:
desc = _NodeDesc("dummy_fqdn", 3, 5)
Reported by Pylint.
Line: 196
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b301-pickle
return self._state, self._token, has_set # type: ignore[return-value]
def get_state_internal(self) -> _RendezvousState:
return pickle.loads(cast(bytes, self._state))
def set_state_internal(self, state: _RendezvousState) -> None:
self._state = pickle.dumps(state)
self._token += 1
Reported by Bandit.
Line: 1112
Column: 13
@patch("torch.distributed.elastic.rendezvous.dynamic_rendezvous._delay")
def test_next_rendezvous_skews_the_first_join_attempt(self, mock_delay) -> None:
for round, expected_call_count in [(0, True), (1, False)]:
with self.subTest(round=round):
self._state.round = round
handler = self._create_handler()
Reported by Pylint.
Line: 1277
Column: 9
handler = self._create_handler()
handler._keep_alive()
self.assertEqual(self._state.last_heartbeats[self._node], now)
def _assert_keep_alive_swallows_rendezvous_errors(self) -> None:
last_heartbeat_time = datetime.utcnow() - (self._keep_alive_interval * 2)
Reported by Pylint.
Line: 1288
Column: 9
handler = self._create_handler()
handler._keep_alive()
self.assertEqual(self._state.last_heartbeats[self._node], last_heartbeat_time)
def test_keep_alive_swallows_rendezvous_errors(self) -> None:
self._mock_sync.side_effect = [RendezvousError]
Reported by Pylint.
Line: 1351
Column: 32
def get_state(self):
return None
def set_state(self, state, token):
return None
class DynamicRendezvousHandlerFromBackendTest(TestCase):
def setUp(self) -> None:
Reported by Pylint.
Line: 1351
Column: 25
def get_state(self):
return None
def set_state(self, state, token):
return None
class DynamicRendezvousHandlerFromBackendTest(TestCase):
def setUp(self) -> None:
Reported by Pylint.
caffe2/python/core_gradients_test.py
144 issues
Line: 6
Column: 1
from hypothesis import given, settings
import hypothesis.strategies as st
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, test_util, workspace
Reported by Pylint.
Line: 7
Column: 1
from hypothesis import given, settings
import hypothesis.strategies as st
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, test_util, workspace
from caffe2.python.core import CreateOperator, GradientRegistry, IR
Reported by Pylint.
Line: 78
Column: 23
@GradientRegistry.RegisterGradient('Nogradient')
def AddNogradient(op, g_output):
return (
[],
[None for s in op.input]
)
Reported by Pylint.
Line: 181
Column: 13
CreateOperator('Direct', 'x', 'y'),
]
try:
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'y': 'y_grad'})
self.assertFalse(True, "Should raise exception of incorrect version")
except RuntimeError as e:
print(e)
self.assertTrue("version" in str(e))
Reported by Pylint.
Line: 183
Column: 13
try:
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'y': 'y_grad'})
self.assertFalse(True, "Should raise exception of incorrect version")
except RuntimeError as e:
print(e)
self.assertTrue("version" in str(e))
pass
Reported by Pylint.
Line: 187
Column: 13
except RuntimeError as e:
print(e)
self.assertTrue("version" in str(e))
pass
def testUseOutput(self):
operators = [
CreateOperator('UseOutput', 'in', 'hidden'),
CreateOperator('UseOutput', 'hidden', 'out'),
Reported by Pylint.
Line: 242
Column: 13
CreateOperator('Direct', 'out', 'sink'),
]
with self.assertRaises(RuntimeError):
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
def testUseInput(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
Reported by Pylint.
Line: 280
Column: 13
CreateOperator('Direct', 'in', 'in'),
]
with self.assertRaises(RuntimeError):
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
@given(device_option=st.sampled_from([
None,
core.DeviceOption(workspace.GpuDeviceType, 1)]))
Reported by Pylint.
Line: 570
Column: 13
]
with self.assertRaises(ValueError):
# This should complain about incorrect use of StopGradient
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
def testStopGradientInplace(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
Reported by Pylint.
Line: 623
Column: 9
def test_two_grads(self):
net = core.Net("test_two_grads")
input, two, three = net.AddExternalInput("input", "two", "three")
m1 = net.Mul([input, two], "mul_1")
m2 = net.Mul([m1, three], "mul_2")
grad_map = net.AddGradientOperators([m2, m1])
workspace.ResetWorkspace()
Reported by Pylint.
caffe2/python/operator_test/locally_connected_op_test.py
142 issues
Line: 6
Column: 1
import numpy as np
from hypothesis import given, settings, assume
import hypothesis.strategies as st
from caffe2.python import core, utils, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 7
Column: 1
import numpy as np
from hypothesis import given, settings, assume
import hypothesis.strategies as st
from caffe2.python import core, utils, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 9
Column: 1
from hypothesis import given, settings, assume
import hypothesis.strategies as st
from caffe2.python import core, utils, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 59
Column: 23
inputs = [X, W, b] if use_bias else [X, W]
def lc_2d_nchw(X, W, b=None):
N, C, XH, XW = X.shape
YH, YW, M, _, KH, KW = W.shape
def conv(n, m, yh, yw):
sum = b[yh, yw, m] if b is not None else 0
for c in range(C):
Reported by Pylint.
Line: 59
Column: 19
inputs = [X, W, b] if use_bias else [X, W]
def lc_2d_nchw(X, W, b=None):
N, C, XH, XW = X.shape
YH, YW, M, _, KH, KW = W.shape
def conv(n, m, yh, yw):
sum = b[yh, yw, m] if b is not None else 0
for c in range(C):
Reported by Pylint.
Line: 63
Column: 17
YH, YW, M, _, KH, KW = W.shape
def conv(n, m, yh, yw):
sum = b[yh, yw, m] if b is not None else 0
for c in range(C):
for kh in range(KH):
for kw in range(KW):
hh = yh + kh
ww = yw + kw
Reported by Pylint.
Line: 128
Column: 19
inputs = [X, W, b] if use_bias else [X, W]
def lc_1d_nchw(X, W, b=None):
N, C, XL = X.shape
YL, M, _, KL = W.shape
def conv(n, m, yl):
sum = b[yl, m] if b is not None else 0
for c in range(C):
Reported by Pylint.
Line: 132
Column: 17
YL, M, _, KL = W.shape
def conv(n, m, yl):
sum = b[yl, m] if b is not None else 0
for c in range(C):
for kl in range(KL):
ll = yl + kl
sum += X[n, c, ll] * W[yl, m, c, kl]
return sum
Reported by Pylint.
Line: 194
Column: 19
inputs = [X, W, b] if use_bias else [X, W]
def lc_3d_nchw(X, W, b=None):
N, C, XT, XH, XW = X.shape
YT, YH, YW, M, _, KT, KH, KW = W.shape
def conv(n, m, yt, yh, yw):
sum = b[yt, yh, yw, m] if b is not None else 0
for c in range(C):
Reported by Pylint.
Line: 194
Column: 23
inputs = [X, W, b] if use_bias else [X, W]
def lc_3d_nchw(X, W, b=None):
N, C, XT, XH, XW = X.shape
YT, YH, YW, M, _, KT, KH, KW = W.shape
def conv(n, m, yt, yh, yw):
sum = b[yt, yh, yw, m] if b is not None else 0
for c in range(C):
Reported by Pylint.
benchmarks/operator_benchmark/pt/quantization_test.py
141 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.quantized as nnq
import torch.quantization as tq
import torch.nn as nn
"""Microbenchmarks for general quantization operations."""
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.quantized as nnq
import torch.quantization as tq
import torch.nn as nn
"""Microbenchmarks for general quantization operations."""
Reported by Pylint.
Line: 5
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.quantized as nnq
import torch.quantization as tq
import torch.nn as nn
"""Microbenchmarks for general quantization operations."""
# mode is used to show the direction of the benchmark:
Reported by Pylint.
Line: 6
Column: 1
import torch
import torch.nn.quantized as nnq
import torch.quantization as tq
import torch.nn as nn
"""Microbenchmarks for general quantization operations."""
# mode is used to show the direction of the benchmark:
# if 'Q', benchmark quantization, else dequantization
Reported by Pylint.
Line: 32
Column: 37
}
quantize_per_tensor_configs_short = op_bench.config_list(
**quantize_configs_short_dict
)
quantize_per_tensor_configs_long = op_bench.cross_product_configs(
**quantize_configs_long_dict
Reported by Pylint.
Line: 36
Column: 36
**quantize_configs_short_dict
)
quantize_per_tensor_configs_long = op_bench.cross_product_configs(
**quantize_configs_long_dict
)
class QuantizePerTensorBenchmark(op_bench.TorchBenchmarkBase):
Reported by Pylint.
Line: 41
Column: 34
)
class QuantizePerTensorBenchmark(op_bench.TorchBenchmarkBase):
r"""Benchmarks both quantization and dequantization."""
def init(self, C, M, N, dtype, mode):
assert(mode in ('Q', 'D'))
self.input = torch.rand(C, M, N)
self.dtype = dtype
Reported by Pylint.
Line: 63
Column: 1
return self.op(input)
op_bench.generate_pt_test(
quantize_per_tensor_configs_short + quantize_per_tensor_configs_long,
QuantizePerTensorBenchmark)
# === Per Channel quantization ===
Reported by Pylint.
Line: 69
Column: 38
# === Per Channel quantization ===
quantize_per_channel_configs_short = op_bench.config_list(
cross_product_configs={
'axis': (0,)
},
**quantize_configs_short_dict
)
Reported by Pylint.
Line: 76
Column: 37
**quantize_configs_short_dict
)
quantize_per_channel_configs_long = op_bench.cross_product_configs(
axis=(0, 1, 2),
**quantize_configs_long_dict
)
class QuantizePerChannelBenchmark(op_bench.TorchBenchmarkBase):
Reported by Pylint.
torch/onnx/symbolic_opset8.py
140 issues
Line: 257
Column: 71
const_value = sym_help._maybe_get_const(value, "t")
if sym_help._is_value(const_value):
tmp = zeros(g, sizes, dtype, layout, device)
return sym_opset9.add(g, tmp, value, g.op("Constant", value_t=torch.tensor(1)))
else:
dtype = sym_help._get_const(dtype, "i", "dtype")
return _constant_fill(g, sizes, dtype, const_value)
Reported by Pylint.
Line: 271
Column: 44
def repeat(g, self, repeats):
if not sym_help._is_value(repeats):
repeats = g.op("Constant", value_t=torch.LongTensor(repeats))
if sym_help._is_packed_list(repeats):
repeat_size_len = len(sym_help._unpack_list(repeats))
else:
const_repeats = sym_help._maybe_get_const(repeats, "is")
repeat_size_len = len(const_repeats)
Reported by Pylint.
Line: 281
Column: 70
sizes = self.type().sizes()
diff_dims = repeat_size_len - len(sizes)
if diff_dims > 0:
self = sym_opset9.view(g, self, g.op("Constant", value_t=torch.tensor([1] * diff_dims + sizes)))
return g.op("Tile", self, repeats)
Reported by Pylint.
Line: 8
Column: 1
from torch.onnx.symbolic_helper import parse_args, _unimplemented, _block_list_in_opset, _try_get_scalar_type
from torch.onnx.symbolic_opset9 import _cast_Float # type: ignore[attr-defined]
from torch.onnx.symbolic_opset7 import div # noqa: F401
import warnings
# Note [ONNX operators that are added/updated from opset 8 to opset 9]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Reported by Pylint.
Line: 55
Column: 24
def _interpolate(name, dim, interpolate_mode):
def symbolic_fn(g, input, output_size, *args):
scales, align_corners = sym_help._get_interpolate_attributes(g, interpolate_mode, args)
sym_help._interpolate_warning(interpolate_mode)
align_corners = sym_help._maybe_get_scalar(align_corners)
if align_corners:
return _unimplemented(name, "align_corners == True")
Reported by Pylint.
Line: 56
Column: 33
def _interpolate(name, dim, interpolate_mode):
def symbolic_fn(g, input, output_size, *args):
scales, align_corners = sym_help._get_interpolate_attributes(g, interpolate_mode, args)
sym_help._interpolate_warning(interpolate_mode)
align_corners = sym_help._maybe_get_scalar(align_corners)
if align_corners:
return _unimplemented(name, "align_corners == True")
output_size = sym_help._maybe_get_const(output_size, "is")
Reported by Pylint.
Line: 57
Column: 9
def _interpolate(name, dim, interpolate_mode):
def symbolic_fn(g, input, output_size, *args):
scales, align_corners = sym_help._get_interpolate_attributes(g, interpolate_mode, args)
sym_help._interpolate_warning(interpolate_mode)
align_corners = sym_help._maybe_get_scalar(align_corners)
if align_corners:
return _unimplemented(name, "align_corners == True")
output_size = sym_help._maybe_get_const(output_size, "is")
if sym_help._is_value(output_size):
Reported by Pylint.
Line: 58
Column: 25
def symbolic_fn(g, input, output_size, *args):
scales, align_corners = sym_help._get_interpolate_attributes(g, interpolate_mode, args)
sym_help._interpolate_warning(interpolate_mode)
align_corners = sym_help._maybe_get_scalar(align_corners)
if align_corners:
return _unimplemented(name, "align_corners == True")
output_size = sym_help._maybe_get_const(output_size, "is")
if sym_help._is_value(output_size):
return _unimplemented(name, "torch._C.Value (output_size) indexing")
Reported by Pylint.
Line: 61
Column: 23
align_corners = sym_help._maybe_get_scalar(align_corners)
if align_corners:
return _unimplemented(name, "align_corners == True")
output_size = sym_help._maybe_get_const(output_size, "is")
if sym_help._is_value(output_size):
return _unimplemented(name, "torch._C.Value (output_size) indexing")
if scales is None:
scales = [1. if i < 2 else
float(output_size[-(dim - i)]) / float(input.type().sizes()[-(dim - i)])
Reported by Pylint.
Line: 62
Column: 12
if align_corners:
return _unimplemented(name, "align_corners == True")
output_size = sym_help._maybe_get_const(output_size, "is")
if sym_help._is_value(output_size):
return _unimplemented(name, "torch._C.Value (output_size) indexing")
if scales is None:
scales = [1. if i < 2 else
float(output_size[-(dim - i)]) / float(input.type().sizes()[-(dim - i)])
for i in range(0, dim)]
Reported by Pylint.
test/jit/test_types.py
140 issues
Line: 4
Column: 1
from collections import namedtuple
from typing import Dict, List, Optional, Tuple
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing import FileCheck
from textwrap import dedent
from jit.test_module_interface import TestModuleInterface # noqa: F401
import inspect
import os
Reported by Pylint.
Line: 5
Column: 1
from typing import Dict, List, Optional, Tuple
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing import FileCheck
from textwrap import dedent
from jit.test_module_interface import TestModuleInterface # noqa: F401
import inspect
import os
import sys
Reported by Pylint.
Line: 11
Column: 1
import inspect
import os
import sys
import torch
import torch.testing._internal.jit_utils
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 12
Column: 1
import os
import sys
import torch
import torch.testing._internal.jit_utils
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 28
Column: 17
# TODO add test to use PEP585 type annotation for return type after py3.9
# see: https://www.python.org/dev/peps/pep-0585/#id5
def fn(x: torch.Tensor) -> Tuple[Tuple[torch.Tensor], Dict[str, int]]:
xl: list[tuple[torch.Tensor]] = []
xd: dict[str, int] = {}
xl.append((x,))
xd['foo'] = 1
return xl.pop(), xd
Reported by Pylint.
Line: 28
Column: 22
# TODO add test to use PEP585 type annotation for return type after py3.9
# see: https://www.python.org/dev/peps/pep-0585/#id5
def fn(x: torch.Tensor) -> Tuple[Tuple[torch.Tensor], Dict[str, int]]:
xl: list[tuple[torch.Tensor]] = []
xd: dict[str, int] = {}
xl.append((x,))
xd['foo'] = 1
return xl.pop(), xd
Reported by Pylint.
Line: 29
Column: 17
# see: https://www.python.org/dev/peps/pep-0585/#id5
def fn(x: torch.Tensor) -> Tuple[Tuple[torch.Tensor], Dict[str, int]]:
xl: list[tuple[torch.Tensor]] = []
xd: dict[str, int] = {}
xl.append((x,))
xd['foo'] = 1
return xl.pop(), xd
self.checkScript(fn, [torch.randn(2, 2)])
Reported by Pylint.
Line: 64
Column: 9
foo = torch.jit.script(Foo())
y = foo(torch.randn(2, 2), torch.randn(2, 2))
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
@torch.jit.ignore
def foo(self, x, z) -> Tuple[GG, GG]:
Reported by Pylint.
Line: 7
Column: 1
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing import FileCheck
from textwrap import dedent
from jit.test_module_interface import TestModuleInterface # noqa: F401
import inspect
import os
import sys
import torch
import torch.testing._internal.jit_utils
Reported by Pylint.
Line: 25
Column: 3
class TestTypesAndAnnotation(JitTestCase):
def test_pep585_type(self):
# TODO add test to use PEP585 type annotation for return type after py3.9
# see: https://www.python.org/dev/peps/pep-0585/#id5
def fn(x: torch.Tensor) -> Tuple[Tuple[torch.Tensor], Dict[str, int]]:
xl: list[tuple[torch.Tensor]] = []
xd: dict[str, int] = {}
xl.append((x,))
Reported by Pylint.
benchmarks/functional_autograd_benchmark/torchvision_models.py
139 issues
Line: 3
Column: 1
# Taken from https://github.com/pytorch/vision
# So that we don't need torchvision to be installed
import torch
from torch import nn
from torch.nn import functional as F
from torch.jit.annotations import Dict
from collections import OrderedDict
Reported by Pylint.
Line: 4
Column: 1
# Taken from https://github.com/pytorch/vision
# So that we don't need torchvision to be installed
import torch
from torch import nn
from torch.nn import functional as F
from torch.jit.annotations import Dict
from collections import OrderedDict
Reported by Pylint.
Line: 5
Column: 1
# So that we don't need torchvision to be installed
import torch
from torch import nn
from torch.nn import functional as F
from torch.jit.annotations import Dict
from collections import OrderedDict
try:
Reported by Pylint.
Line: 7
Column: 1
from torch import nn
from torch.nn import functional as F
from torch.jit.annotations import Dict
from collections import OrderedDict
try:
from scipy.optimize import linear_sum_assignment
scipy_available = True
Reported by Pylint.
Line: 11
Column: 5
from collections import OrderedDict
try:
from scipy.optimize import linear_sum_assignment
scipy_available = True
except Exception:
scipy_available = False
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
Reported by Pylint.
Line: 654
Column: 31
src_masks = outputs["pred_masks"]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list([t["masks"] for t in targets]).decompose()
target_masks = target_masks.to(src_masks)
src_masks = src_masks[src_idx]
# upsample predictions to the target size
src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],
Reported by Pylint.
Line: 659
Column: 21
src_masks = src_masks[src_idx]
# upsample predictions to the target size
src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],
mode="bilinear", align_corners=False)
src_masks = src_masks[:, 0].flatten(1)
target_masks = target_masks[tgt_idx].flatten(1)
Reported by Pylint.
Line: 666
Column: 26
target_masks = target_masks[tgt_idx].flatten(1)
losses = {
"loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
"loss_dice": dice_loss(src_masks, target_masks, num_boxes),
}
return losses
def _get_src_permutation_idx(self, indices):
Reported by Pylint.
Line: 667
Column: 26
losses = {
"loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
"loss_dice": dice_loss(src_masks, target_masks, num_boxes),
}
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
Reported by Pylint.
Line: 13
Column: 8
try:
from scipy.optimize import linear_sum_assignment
scipy_available = True
except Exception:
scipy_available = False
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
Reported by Pylint.
torch/nn/quantized/modules/functional_modules.py
139 issues
Line: 43
Column: 13
r"""Operation equivalent to ``torch.add(Tensor, Tensor)``"""
def add(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.add(x, y)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.add(Tensor, float)``"""
def add_scalar(self, x: Tensor, y: float) -> Tensor:
Reported by Pylint.
Line: 49
Column: 13
r"""Operation equivalent to ``torch.add(Tensor, float)``"""
def add_scalar(self, x: Tensor, y: float) -> Tensor:
r = torch.add(x, y)
# Note: this operation is not observed because the observation is not
# needed for the quantized op.
return r
r"""Operation equivalent to ``torch.mul(Tensor, Tensor)``"""
Reported by Pylint.
Line: 56
Column: 13
r"""Operation equivalent to ``torch.mul(Tensor, Tensor)``"""
def mul(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.mul(x, y)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.mul(Tensor, float)``"""
def mul_scalar(self, x: Tensor, y: float) -> Tensor:
Reported by Pylint.
Line: 62
Column: 13
r"""Operation equivalent to ``torch.mul(Tensor, float)``"""
def mul_scalar(self, x: Tensor, y: float) -> Tensor:
r = torch.mul(x, y)
# Note: this operation is not observed because the observation is not
# needed for the quantized op.
return r
r"""Operation equivalent to ``torch.cat``"""
Reported by Pylint.
Line: 69
Column: 13
r"""Operation equivalent to ``torch.cat``"""
def cat(self, x: List[Tensor], dim: int = 0) -> Tensor:
r = torch.cat(x, dim=dim)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``relu(torch.add(x,y))``"""
def add_relu(self, x: Tensor, y: Tensor) -> Tensor:
Reported by Pylint.
Line: 75
Column: 13
r"""Operation equivalent to ``relu(torch.add(x,y))``"""
def add_relu(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.add(x, y)
r = torch.nn.functional.relu(r)
r = self.activation_post_process(r)
return r
class FXFloatFunctional(torch.nn.Module):
Reported by Pylint.
Line: 98
Column: 13
r"""Operation equivalent to ``torch.add(Tensor, Tensor)``"""
def add(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.add(x, y)
return r
r"""Operation equivalent to ``torch.add(Tensor, float)``"""
def add_scalar(self, x: Tensor, y: float) -> Tensor:
r = torch.add(x, y)
Reported by Pylint.
Line: 103
Column: 13
r"""Operation equivalent to ``torch.add(Tensor, float)``"""
def add_scalar(self, x: Tensor, y: float) -> Tensor:
r = torch.add(x, y)
return r
r"""Operation equivalent to ``torch.mul(Tensor, Tensor)``"""
def mul(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.mul(x, y)
Reported by Pylint.
Line: 108
Column: 13
r"""Operation equivalent to ``torch.mul(Tensor, Tensor)``"""
def mul(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.mul(x, y)
return r
r"""Operation equivalent to ``torch.mul(Tensor, float)``"""
def mul_scalar(self, x: Tensor, y: float) -> Tensor:
r = torch.mul(x, y)
Reported by Pylint.
Line: 113
Column: 13
r"""Operation equivalent to ``torch.mul(Tensor, float)``"""
def mul_scalar(self, x: Tensor, y: float) -> Tensor:
r = torch.mul(x, y)
return r
r"""Operation equivalent to ``torch.cat``"""
def cat(self, x: List[Tensor], dim: int = 0) -> Tensor:
r = torch.cat(x, dim=dim)
Reported by Pylint.
caffe2/python/memonger.py
139 issues
Line: 8
Column: 1
import networkx as nx
import collections
import time
import copy
from caffe2.python import workspace, core
from caffe2.proto import caffe2_pb2
Reported by Pylint.
Line: 86
Column: 17
if is_grad_blob(b) or (share_activations and b in activations):
shared_blobs.add(b)
start_time = time.time()
optim_str = C.memonger_compute_blob_recycling_for_dag(
netproto.SerializeToString(),
[str(s).encode('utf-8') for s in losses],
grad_op_indices,
set(str(s).encode('utf-8') for s in shared_blobs),
namescope.encode('utf-8'),
Reported by Pylint.
Line: 137
Column: 17
assert not op.is_gradient_op, \
"You can only pass inference-only nets to optimize_inference_for_dag"
start_time = time.time()
optim_str = C.memonger_compute_blob_recycling_for_dag(
netproto.SerializeToString(),
[str(s).encode('utf-8') for s in input_blobs],
op_indices,
set(str(s).encode('utf-8') for s in activation_blobs),
namescope.encode('utf-8'),
Reported by Pylint.
Line: 826
Column: 17
def optimize_inference_fast(net, static_blobs):
optim = caffe2_pb2.NetDef()
optim_str = C.memonger_optimize_inference_net(
net.SerializeToString(),
[str(s).encode('utf-8') for s in static_blobs]
)
optim.ParseFromString(optim_str)
return optim
Reported by Pylint.
Line: 49
Column: 3
name.startswith("_" + namescope)) and name not in param_grads
def is_grad_op(op):
# TODO: something smarter
for b in list(op.input) + list(op.output):
if is_grad_blob(b):
return True
return False
Reported by Pylint.
Line: 55
Column: 5
return True
return False
log.warn("NOTE: Executing memonger to optimize gradient memory")
# Collect ops that have something to do with gradients
if namescope != "" and not namescope.endswith("/"):
namescope += "/"
Reported by Pylint.
Line: 96
Column: 14
{} if blob_shapes is None else blob_shapes
)
log.info("Memonger memory optimization took {} secs".format(
time.time() - start_time),
)
optim = caffe2_pb2.NetDef()
optim.ParseFromString(optim_str)
Reported by Pylint.
Line: 147
Column: 14
{}
)
log.info("Memonger memory optimization took {} secs".format(
time.time() - start_time),
)
optim = caffe2_pb2.NetDef()
optim.ParseFromString(optim_str)
Reported by Pylint.
Line: 163
Column: 5
def estimate_memory_usage(protos, shapes, types, devicescope):
import numpy as np
'''
Estimate memory usage of a model. This is an estimate because
we assume a single threaded execution and miss some internal
memory usage of operators. Only estimates the memory for a given
device scope.
Reported by Pylint.
Line: 196
Column: 25
def num_bytes(blob):
if blob not in shapes or blob not in types:
log.warning("Unknown blob encountered: {}".format(blob))
return 0
sizeof = sizeofs[types[blob]]
return sizeof * np.prod(shapes[blob])
protos = [split_net(proto) for proto in protos]
Reported by Pylint.
torch/onnx/symbolic_opset12.py
139 issues
Line: 33
Column: 34
if not sym_help._training_mode:
return input
p = g.op("Constant", value_t=torch.tensor(p))
t = g.op("Constant", value_t=torch.tensor(True))
r, _ = g.op("Dropout", input, p, t, outputs=2)
return r
Reported by Pylint.
Line: 34
Column: 34
return input
p = g.op("Constant", value_t=torch.tensor(p))
t = g.op("Constant", value_t=torch.tensor(True))
r, _ = g.op("Dropout", input, p, t, outputs=2)
return r
def nll_loss(g, self, target, weight, reduction, ignore_index):
Reported by Pylint.
Line: 88
Column: 34
@parse_args("v", "v", "v", "v", "i")
def binary_cross_entropy_with_logits(g, input, target, weight, pos_weight, reduction):
from torch.onnx.symbolic_opset9 import sigmoid, log, sub, neg, mul, add
p = g.op("Constant", value_t=torch.tensor([1]))
sig_x = sigmoid(g, input)
log_sig_x = log(g, sig_x)
sub_1_x = sub(g, p, sig_x)
sub_1_y = sub(g, p, target)
log_1_x = log(g, sub_1_x)
Reported by Pylint.
Line: 127
Column: 64
def argmax(g, input, dim, keepdim):
if sym_help._is_none(dim):
from torch.onnx.symbolic_opset9 import reshape
flattened = reshape(g, input, g.op("Constant", value_t=torch.tensor([-1])))
return g.op("ArgMax", flattened, axis_i=0, keepdims_i=False, select_last_index_i=False)
else:
dim = _parse_arg(dim, "i")
keepdim = _parse_arg(keepdim, "i")
return g.op("ArgMax", input, axis_i=dim, keepdims_i=keepdim, select_last_index_i=False)
Reported by Pylint.
Line: 138
Column: 64
def argmin(g, input, dim, keepdim):
if sym_help._is_none(dim):
from torch.onnx.symbolic_opset9 import reshape
flattened = reshape(g, input, g.op("Constant", value_t=torch.tensor([-1])))
return g.op("ArgMin", flattened, axis_i=0, keepdims_i=False, select_last_index_i=False)
else:
dim = _parse_arg(dim, "i")
keepdim = _parse_arg(keepdim, "i")
return g.op("ArgMin", input, axis_i=dim, keepdims_i=keepdim, select_last_index_i=False)
Reported by Pylint.
Line: 167
Column: 46
sizedim = sym_help._get_tensor_dim_size(input, dimension)
if sizedim is not None:
low_start = g.op("Constant", value_t=torch.tensor(0))
low_end = g.op("Constant", value_t=torch.tensor(sizedim))
hi_end = g.op("Constant", value_t=torch.tensor(sizedim + 1))
low_indices = g.op("Range", low_start, low_end, step)
hi_indices = g.op("Range", size, hi_end, step)
Reported by Pylint.
Line: 168
Column: 44
sizedim = sym_help._get_tensor_dim_size(input, dimension)
if sizedim is not None:
low_start = g.op("Constant", value_t=torch.tensor(0))
low_end = g.op("Constant", value_t=torch.tensor(sizedim))
hi_end = g.op("Constant", value_t=torch.tensor(sizedim + 1))
low_indices = g.op("Range", low_start, low_end, step)
hi_indices = g.op("Range", size, hi_end, step)
low_size = sym_help._size_helper(g, low_indices, g.op("Constant", value_t=torch.tensor(0)))
Reported by Pylint.
Line: 169
Column: 43
if sizedim is not None:
low_start = g.op("Constant", value_t=torch.tensor(0))
low_end = g.op("Constant", value_t=torch.tensor(sizedim))
hi_end = g.op("Constant", value_t=torch.tensor(sizedim + 1))
low_indices = g.op("Range", low_start, low_end, step)
hi_indices = g.op("Range", size, hi_end, step)
low_size = sym_help._size_helper(g, low_indices, g.op("Constant", value_t=torch.tensor(0)))
hi_size = sym_help._size_helper(g, hi_indices, g.op("Constant", value_t=torch.tensor(0)))
Reported by Pylint.
Line: 173
Column: 83
low_indices = g.op("Range", low_start, low_end, step)
hi_indices = g.op("Range", size, hi_end, step)
low_size = sym_help._size_helper(g, low_indices, g.op("Constant", value_t=torch.tensor(0)))
hi_size = sym_help._size_helper(g, hi_indices, g.op("Constant", value_t=torch.tensor(0)))
ndim = sym_help._get_tensor_rank(input)
perm = list(range(0, ndim))
perm.append(perm.pop(dimension))
Reported by Pylint.
Line: 174
Column: 81
hi_indices = g.op("Range", size, hi_end, step)
low_size = sym_help._size_helper(g, low_indices, g.op("Constant", value_t=torch.tensor(0)))
hi_size = sym_help._size_helper(g, hi_indices, g.op("Constant", value_t=torch.tensor(0)))
ndim = sym_help._get_tensor_rank(input)
perm = list(range(0, ndim))
perm.append(perm.pop(dimension))
Reported by Pylint.