The following issues were found
torch/multiprocessing/reductions.py
63 issues
Line: 30
Column: 31
self.cdata = storage._weak_ref()
# Save a direct reference to _free_weak_ref because the `torch` module
# might be cleared during Python shutdown before this module is cleared.
self._free_weak_ref = torch.Storage._free_weak_ref # type: ignore[attr-defined]
def expired(self):
return torch.Storage._expired(self.cdata) # type: ignore[attr-defined]
def __del__(self):
Reported by Pylint.
Line: 33
Column: 16
self._free_weak_ref = torch.Storage._free_weak_ref # type: ignore[attr-defined]
def expired(self):
return torch.Storage._expired(self.cdata) # type: ignore[attr-defined]
def __del__(self):
self._free_weak_ref(self.cdata)
Reported by Pylint.
Line: 315
Column: 5
def reduce_storage(storage):
from . import get_sharing_strategy
if storage.is_cuda:
raise RuntimeError("Cannot pickle CUDA storage; try pickling a CUDA tensor instead")
elif get_sharing_strategy() == 'file_system':
metadata = storage._share_filename_()
cache_key = metadata[1]
Reported by Pylint.
Line: 33
Column: 16
self._free_weak_ref = torch.Storage._free_weak_ref # type: ignore[attr-defined]
def expired(self):
return torch.Storage._expired(self.cdata) # type: ignore[attr-defined]
def __del__(self):
self._free_weak_ref(self.cdata)
Reported by Pylint.
Line: 42
Column: 5
class SharedCache(dict):
"""dictionary from multiprocessing handles to StorageWeakRef"""
def __init__(self):
# free_dead_references() is called if the len exceeds the current
# limit. The limit scales with the number of remaining live objects.
self.limit = 128
# `fork` inherits lock state, so in case we fork when the lock is held,
# we register a function to reset the lock to a new object to avoid
Reported by Pylint.
Line: 90
Column: 9
def rebuild_tensor(cls, storage, metadata):
storage_offset, size, stride, requires_grad = metadata
t = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
if cls == torch.nn.parameter.Parameter:
# we have to pass requires_grad into constructor, rather than set it as an
# attribute later, because it's an important check for Integer Tensors to
# have requires_grad=False (or else they raise an error)
t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
Reported by Pylint.
Line: 90
Column: 9
def rebuild_tensor(cls, storage, metadata):
storage_offset, size, stride, requires_grad = metadata
t = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
if cls == torch.nn.parameter.Parameter:
# we have to pass requires_grad into constructor, rather than set it as an
# attribute later, because it's an important check for Integer Tensors to
# have requires_grad=False (or else they raise an error)
t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
Reported by Pylint.
Line: 110
Column: 13
else:
storage = storage_from_cache(storage_cls, (storage_handle, storage_offset_bytes))
if storage is None:
torch.cuda._lazy_init()
storage = storage_cls._new_shared_cuda(
storage_device,
storage_handle,
storage_size_bytes,
storage_offset_bytes,
Reported by Pylint.
Line: 111
Column: 23
storage = storage_from_cache(storage_cls, (storage_handle, storage_offset_bytes))
if storage is None:
torch.cuda._lazy_init()
storage = storage_cls._new_shared_cuda(
storage_device,
storage_handle,
storage_size_bytes,
storage_offset_bytes,
ref_counter_handle,
Reported by Pylint.
Line: 123
Column: 13
shared_cache[(storage_handle, storage_offset_bytes)] = StorageWeakRef(storage)
else:
# We already ref counting this Storage, but producer needs new ref-counters to be released.
storage_cls._release_ipc_counter(ref_counter_handle, ref_counter_offset)
t = torch._utils._rebuild_tensor(storage, tensor_offset, tensor_size, tensor_stride)
if tensor_cls == torch.nn.parameter.Parameter:
# It is crucial for integer tensors to receive
Reported by Pylint.
caffe2/python/operator_test/gru_test.py
63 issues
Line: 14
Column: 1
from caffe2.proto import caffe2_pb2
from functools import partial
from hypothesis import given
from hypothesis import settings as ht_settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 15
Column: 1
from functools import partial
from hypothesis import given
from hypothesis import settings as ht_settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 16
Column: 1
from functools import partial
from hypothesis import given
from hypothesis import settings as ht_settings
import hypothesis.strategies as st
import numpy as np
import unittest
def gru_unit(*args, **kwargs):
Reported by Pylint.
Line: 74
Column: 19
return (hidden_t, )
def gru_reference(input, hidden_input,
reset_gate_w, reset_gate_b,
update_gate_w, update_gate_b,
output_gate_w, output_gate_b,
seq_lengths, drop_states=False,
linear_before_reset=False):
Reported by Pylint.
Line: 174
Column: 36
return dims_.flatmap(create_input)
def _prepare_gru_unit_op(gc, n, d, outputs_with_grads,
forward_only=False, drop_states=False,
sequence_lengths=False,
two_d_initial_states=None):
print("Dims: (n,d) = ({},{})".format(n, d))
Reported by Pylint.
Line: 175
Column: 26
def _prepare_gru_unit_op(gc, n, d, outputs_with_grads,
forward_only=False, drop_states=False,
sequence_lengths=False,
two_d_initial_states=None):
print("Dims: (n,d) = ({},{})".format(n, d))
def generate_input_state(n, d):
Reported by Pylint.
Line: 261
Column: 61
**hu.gcs
)
def test_gru_unit_op(self, seed, input_tensor, fwd_only,
drop_states, sequence_lengths, gc, dc):
np.random.seed(seed)
outputs_with_grads = [0]
ref = gru_unit
ref = partial(ref)
Reported by Pylint.
Line: 267
Column: 9
ref = gru_unit
ref = partial(ref)
t, n, d = input_tensor.shape
assert d % 3 == 0
d = d // 3
ref = partial(ref, drop_states=drop_states,
sequence_lengths=sequence_lengths)
Reported by Pylint.
Line: 285
Column: 14
input_tensor,
device_option=gc)
print(str(net.Proto()))
op = net._net.op[-1]
inputs = [workspace.FetchBlob(name) for name in op.input]
self.assertReferenceChecks(
gc,
op,
Reported by Pylint.
Line: 330
Column: 80
**kwargs)
def gru_base(self, create_rnn, ref, outputs_with_grads,
input_tensor, fwd_only, drop_states, linear_before_reset, gc, dc):
print("GRU test parameters: ", locals())
t, n, d = input_tensor.shape
assert d % 3 == 0
d = d // 3
Reported by Pylint.
test/test_native_functions.py
63 issues
Line: 2
Column: 1
from typing import Optional, List
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
# End-to-end tests of features in native_functions.yaml
class FloatListWrapperModule(torch.nn.Module):
def forward(self, values, incr: Optional[List[float]]):
Reported by Pylint.
Line: 3
Column: 1
from typing import Optional, List
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
# End-to-end tests of features in native_functions.yaml
class FloatListWrapperModule(torch.nn.Module):
def forward(self, values, incr: Optional[List[float]]):
Reported by Pylint.
Line: 10
Column: 16
class FloatListWrapperModule(torch.nn.Module):
def forward(self, values, incr: Optional[List[float]]):
return torch._C._nn._test_optional_floatlist(values, incr)
class IntListWrapperModule(torch.nn.Module):
def forward(self, values, incr: Optional[List[int]]):
return torch._C._nn._test_optional_intlist(values, incr)
Reported by Pylint.
Line: 10
Column: 16
class FloatListWrapperModule(torch.nn.Module):
def forward(self, values, incr: Optional[List[float]]):
return torch._C._nn._test_optional_floatlist(values, incr)
class IntListWrapperModule(torch.nn.Module):
def forward(self, values, incr: Optional[List[int]]):
return torch._C._nn._test_optional_intlist(values, incr)
Reported by Pylint.
Line: 10
Column: 16
class FloatListWrapperModule(torch.nn.Module):
def forward(self, values, incr: Optional[List[float]]):
return torch._C._nn._test_optional_floatlist(values, incr)
class IntListWrapperModule(torch.nn.Module):
def forward(self, values, incr: Optional[List[int]]):
return torch._C._nn._test_optional_intlist(values, incr)
Reported by Pylint.
Line: 15
Column: 16
class IntListWrapperModule(torch.nn.Module):
def forward(self, values, incr: Optional[List[int]]):
return torch._C._nn._test_optional_intlist(values, incr)
class TestNativeFunctions(TestCase):
#
Reported by Pylint.
Line: 15
Column: 16
class IntListWrapperModule(torch.nn.Module):
def forward(self, values, incr: Optional[List[int]]):
return torch._C._nn._test_optional_intlist(values, incr)
class TestNativeFunctions(TestCase):
#
Reported by Pylint.
Line: 15
Column: 16
class IntListWrapperModule(torch.nn.Module):
def forward(self, values, incr: Optional[List[int]]):
return torch._C._nn._test_optional_intlist(values, incr)
class TestNativeFunctions(TestCase):
#
Reported by Pylint.
Line: 39
Column: 20
def trace_optional_floatlist(self, const):
def wrapper(values):
return torch._C._nn._test_optional_floatlist(values, const)
return torch.jit.trace(wrapper, torch.tensor([1.5, 2.5], dtype=torch.float))
def test_optional_floatlist(self):
self.do_test_optional_floatlist_with_module(FloatListWrapperModule())
self.do_test_optional_floatlist_with_module(torch.jit.script(FloatListWrapperModule()))
Reported by Pylint.
Line: 39
Column: 20
def trace_optional_floatlist(self, const):
def wrapper(values):
return torch._C._nn._test_optional_floatlist(values, const)
return torch.jit.trace(wrapper, torch.tensor([1.5, 2.5], dtype=torch.float))
def test_optional_floatlist(self):
self.do_test_optional_floatlist_with_module(FloatListWrapperModule())
self.do_test_optional_floatlist_with_module(torch.jit.script(FloatListWrapperModule()))
Reported by Pylint.
benchmarks/tensorexpr/pt_engine.py
63 issues
Line: 1
Column: 1
import torch
class TorchTensorEngine(object):
def rand(self, shape, device=None, dtype=None, requires_grad=False):
return torch.rand(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def randn(self, shape, device=None, dtype=None, requires_grad=False):
return torch.randn(shape, device=device, dtype=dtype, requires_grad=requires_grad)
Reported by Pylint.
Line: 53
Column: 27
def cat(self, inputs, dim=0):
return torch.cat(inputs, dim=dim)
def clamp(self, data, min, max):
return torch.clamp(data, min=min, max=max)
def relu(self, data):
return torch.nn.functional.relu(data)
Reported by Pylint.
Line: 53
Column: 32
def cat(self, inputs, dim=0):
return torch.cat(inputs, dim=dim)
def clamp(self, data, min, max):
return torch.clamp(data, min=min, max=max)
def relu(self, data):
return torch.nn.functional.relu(data)
Reported by Pylint.
Line: 1
Column: 1
import torch
class TorchTensorEngine(object):
def rand(self, shape, device=None, dtype=None, requires_grad=False):
return torch.rand(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def randn(self, shape, device=None, dtype=None, requires_grad=False):
return torch.randn(shape, device=device, dtype=dtype, requires_grad=requires_grad)
Reported by Pylint.
Line: 4
Column: 1
import torch
class TorchTensorEngine(object):
def rand(self, shape, device=None, dtype=None, requires_grad=False):
return torch.rand(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def randn(self, shape, device=None, dtype=None, requires_grad=False):
return torch.randn(shape, device=device, dtype=dtype, requires_grad=requires_grad)
Reported by Pylint.
Line: 4
Column: 1
import torch
class TorchTensorEngine(object):
def rand(self, shape, device=None, dtype=None, requires_grad=False):
return torch.rand(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def randn(self, shape, device=None, dtype=None, requires_grad=False):
return torch.randn(shape, device=device, dtype=dtype, requires_grad=requires_grad)
Reported by Pylint.
Line: 4
Column: 1
import torch
class TorchTensorEngine(object):
def rand(self, shape, device=None, dtype=None, requires_grad=False):
return torch.rand(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def randn(self, shape, device=None, dtype=None, requires_grad=False):
return torch.randn(shape, device=device, dtype=dtype, requires_grad=requires_grad)
Reported by Pylint.
Line: 5
Column: 5
class TorchTensorEngine(object):
def rand(self, shape, device=None, dtype=None, requires_grad=False):
return torch.rand(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def randn(self, shape, device=None, dtype=None, requires_grad=False):
return torch.randn(shape, device=device, dtype=dtype, requires_grad=requires_grad)
Reported by Pylint.
Line: 5
Column: 5
class TorchTensorEngine(object):
def rand(self, shape, device=None, dtype=None, requires_grad=False):
return torch.rand(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def randn(self, shape, device=None, dtype=None, requires_grad=False):
return torch.randn(shape, device=device, dtype=dtype, requires_grad=requires_grad)
Reported by Pylint.
Line: 8
Column: 5
def rand(self, shape, device=None, dtype=None, requires_grad=False):
return torch.rand(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def randn(self, shape, device=None, dtype=None, requires_grad=False):
return torch.randn(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def nchw_rand(self, shape, device=None, requires_grad=False):
return self.rand(shape, device=device, requires_grad=requires_grad)
Reported by Pylint.
caffe2/python/scope_test.py
63 issues
Line: 16
Column: 5
def thread_runner(idx, testobj):
global SUCCESS_COUNT
testobj.assertEquals(scope.CurrentNameScope(), "")
testobj.assertEquals(scope.CurrentDeviceScope(), None)
namescope = "namescope_{}".format(idx)
dsc = core.DeviceOption(workspace.GpuDeviceType, idx)
with scope.DeviceScope(dsc):
Reported by Pylint.
Line: 38
Column: 9
class TestScope(unittest.TestCase):
def testNamescopeBasic(self):
self.assertEquals(scope.CurrentNameScope(), "")
with scope.NameScope("test_scope"):
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
self.assertEquals(scope.CurrentNameScope(), "")
Reported by Pylint.
Line: 41
Column: 13
self.assertEquals(scope.CurrentNameScope(), "")
with scope.NameScope("test_scope"):
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
self.assertEquals(scope.CurrentNameScope(), "")
def testNamescopeAssertion(self):
self.assertEquals(scope.CurrentNameScope(), "")
Reported by Pylint.
Line: 43
Column: 9
with scope.NameScope("test_scope"):
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
self.assertEquals(scope.CurrentNameScope(), "")
def testNamescopeAssertion(self):
self.assertEquals(scope.CurrentNameScope(), "")
try:
Reported by Pylint.
Line: 46
Column: 9
self.assertEquals(scope.CurrentNameScope(), "")
def testNamescopeAssertion(self):
self.assertEquals(scope.CurrentNameScope(), "")
try:
with scope.NameScope("test_scope"):
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
raise Exception()
Reported by Pylint.
Line: 50
Column: 17
try:
with scope.NameScope("test_scope"):
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
raise Exception()
except Exception:
pass
self.assertEquals(scope.CurrentNameScope(), "")
Reported by Pylint.
Line: 52
Column: 16
with scope.NameScope("test_scope"):
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
raise Exception()
except Exception:
pass
self.assertEquals(scope.CurrentNameScope(), "")
def testEmptyNamescopeBasic(self):
Reported by Pylint.
Line: 55
Column: 9
except Exception:
pass
self.assertEquals(scope.CurrentNameScope(), "")
def testEmptyNamescopeBasic(self):
self.assertEquals(scope.CurrentNameScope(), "")
with scope.NameScope("test_scope"):
Reported by Pylint.
Line: 58
Column: 9
self.assertEquals(scope.CurrentNameScope(), "")
def testEmptyNamescopeBasic(self):
self.assertEquals(scope.CurrentNameScope(), "")
with scope.NameScope("test_scope"):
with scope.EmptyNameScope():
self.assertEquals(scope.CurrentNameScope(), "")
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
Reported by Pylint.
Line: 62
Column: 17
with scope.NameScope("test_scope"):
with scope.EmptyNameScope():
self.assertEquals(scope.CurrentNameScope(), "")
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
def testDevicescopeBasic(self):
self.assertEquals(scope.CurrentDeviceScope(), None)
Reported by Pylint.
torch/utils/tensorboard/_caffe2_graph.py
63 issues
Line: 6
Column: 1
import os
import re
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from builtins import bytes
from caffe2.proto import caffe2_pb2
Reported by Pylint.
Line: 7
Column: 1
import re
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from builtins import bytes
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
Reported by Pylint.
Line: 8
Column: 1
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from builtins import bytes
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
Reported by Pylint.
Line: 751
Column: 12
# We don't care about the types, only the shapes
shapes, _ = workspace.InferShapesAndTypes(nets)
return shapes
except Exception as e:
logging.warning('Failed to compute shapes: %s', e)
return {}
def model_to_graph_def(model, **kwargs):
Reported by Pylint.
Line: 1
Column: 1
import copy
import logging
import os
import re
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
Reported by Pylint.
Line: 10
Column: 1
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from builtins import bytes
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from typing import Set, Dict, Tuple, List
Reported by Pylint.
Line: 11
Column: 1
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from builtins import bytes
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from typing import Set, Dict, Tuple, List
Reported by Pylint.
Line: 12
Column: 1
from builtins import bytes
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from typing import Set, Dict, Tuple, List
def _make_unique_name(seen: Set[str], name: str, min_version: int = 0):
Reported by Pylint.
Line: 14
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from typing import Set, Dict, Tuple, List
def _make_unique_name(seen: Set[str], name: str, min_version: int = 0):
'''
Make the name unique by appending a unique number to the name. Used for SSA.
Reported by Pylint.
Line: 31
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
Returns:
x (string): A version of name that is not in seen.
'''
assert name is not None
i = min_version
x = '%s_%d' % (name, i) if i else name
while x in seen:
i += 1
x = '%s_%d' % (name, i)
Reported by Bandit.
torch/utils/mkldnn.py
63 issues
Line: 16
Column: 17
# TODO: Remove this once ScriptModule supports registering None buffer
self.register_buffer(
'bias',
torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn())
@torch.jit.script_method
def __getstate__(self):
return (self.weight.to_dense(), self.bias.to_dense(), self.training)
Reported by Pylint.
Line: 16
Column: 66
# TODO: Remove this once ScriptModule supports registering None buffer
self.register_buffer(
'bias',
torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn())
@torch.jit.script_method
def __getstate__(self):
return (self.weight.to_dense(), self.bias.to_dense(), self.training)
Reported by Pylint.
Line: 56
Column: 17
# TODO: Remove this once ScriptModule supports registering None buffer
self.register_buffer(
'bias',
torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn())
@torch.jit.script_method
def __getstate__(self):
return (self.weight.to_dense(), self.bias.to_dense(), self.training)
Reported by Pylint.
Line: 56
Column: 66
# TODO: Remove this once ScriptModule supports registering None buffer
self.register_buffer(
'bias',
torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn())
@torch.jit.script_method
def __getstate__(self):
return (self.weight.to_dense(), self.bias.to_dense(), self.training)
Reported by Pylint.
Line: 64
Column: 16
@torch.jit.script_method
def forward(self, x):
return torch.mkldnn_convolution(
x,
self.weight,
self.bias,
self.padding,
self.stride,
Reported by Pylint.
Line: 171
Column: 16
@torch.jit.script_method
def forward(self, x):
return torch.batch_norm(
x,
self.weight,
self.bias,
self.running_mean,
self.running_var,
Reported by Pylint.
Line: 184
Column: 29
)
def to_mkldnn(module, dtype=torch.float):
assert dtype in [torch.float, torch.bfloat16], \
"MKLDNN only support float or bfloat16 path now"
def m_fn(m, d):
if isinstance(m, torch.nn.Linear):
Reported by Pylint.
Line: 185
Column: 35
def to_mkldnn(module, dtype=torch.float):
assert dtype in [torch.float, torch.bfloat16], \
"MKLDNN only support float or bfloat16 path now"
def m_fn(m, d):
if isinstance(m, torch.nn.Linear):
return MkldnnLinear(m, d)
Reported by Pylint.
Line: 185
Column: 22
def to_mkldnn(module, dtype=torch.float):
assert dtype in [torch.float, torch.bfloat16], \
"MKLDNN only support float or bfloat16 path now"
def m_fn(m, d):
if isinstance(m, torch.nn.Linear):
return MkldnnLinear(m, d)
Reported by Pylint.
Line: 13
Column: 3
# we use fp32 dtype.
self.register_buffer('bias', dense_module.bias.to_mkldnn())
else:
# TODO: Remove this once ScriptModule supports registering None buffer
self.register_buffer(
'bias',
torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn())
@torch.jit.script_method
Reported by Pylint.
torch/optim/_multi_tensor/rmsprop.py
63 issues
Line: 2
Column: 1
import torch
from ..optimizer import Optimizer
from collections import defaultdict
class RMSprop(Optimizer):
r"""Implements RMSprop algorithm.
Proposed by G. Hinton in his
`course <https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.
Reported by Pylint.
Line: 87
Column: 47
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
Reported by Pylint.
Line: 87
Column: 81
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
Reported by Pylint.
Line: 89
Column: 90
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['step'] += 1
Reported by Pylint.
Line: 89
Column: 56
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['step'] += 1
Reported by Pylint.
Line: 91
Column: 49
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['step'] += 1
states.append(state)
square_avg.append(state['square_avg'])
Reported by Pylint.
Line: 91
Column: 83
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['step'] += 1
states.append(state)
square_avg.append(state['square_avg'])
Reported by Pylint.
Line: 99
Column: 17
square_avg.append(state['square_avg'])
if group['weight_decay'] != 0:
torch._foreach_add_(grads, params_with_grad, alpha=group['weight_decay'])
torch._foreach_mul_(square_avg, alpha)
torch._foreach_addcmul_(square_avg, grads, grads, value=1 - alpha)
if group['centered']:
Reported by Pylint.
Line: 101
Column: 13
if group['weight_decay'] != 0:
torch._foreach_add_(grads, params_with_grad, alpha=group['weight_decay'])
torch._foreach_mul_(square_avg, alpha)
torch._foreach_addcmul_(square_avg, grads, grads, value=1 - alpha)
if group['centered']:
grad_avgs = [s['grad_avg'] for s in states]
torch._foreach_mul_(grad_avgs, alpha)
Reported by Pylint.
Line: 102
Column: 13
torch._foreach_add_(grads, params_with_grad, alpha=group['weight_decay'])
torch._foreach_mul_(square_avg, alpha)
torch._foreach_addcmul_(square_avg, grads, grads, value=1 - alpha)
if group['centered']:
grad_avgs = [s['grad_avg'] for s in states]
torch._foreach_mul_(grad_avgs, alpha)
torch._foreach_add_(grad_avgs, grads, alpha=1 - alpha)
Reported by Pylint.
caffe2/python/brew_test.py
62 issues
Line: 21
Column: 16
def myhelper(model, val=-1):
return val
if not brew.has_helper(myhelper):
brew.Register(myhelper)
self.myhelper = myhelper
def myhelper2(model, val=-1):
return val
Reported by Pylint.
Line: 22
Column: 13
return val
if not brew.has_helper(myhelper):
brew.Register(myhelper)
self.myhelper = myhelper
def myhelper2(model, val=-1):
return val
Reported by Pylint.
Line: 28
Column: 16
def myhelper2(model, val=-1):
return val
if not brew.has_helper(myhelper2):
brew.Register(myhelper2)
self.myhelper2 = myhelper2
self.model = ModelHelper(name="test_model")
def test_dropout(self):
Reported by Pylint.
Line: 29
Column: 13
return val
if not brew.has_helper(myhelper2):
brew.Register(myhelper2)
self.myhelper2 = myhelper2
self.model = ModelHelper(name="test_model")
def test_dropout(self):
p = 0.2
Reported by Pylint.
Line: 101
Column: 19
myhelper2 = self.myhelper2
n = 15
with brew.arg_scope([myhelper], val=n):
res = brew.myhelper(self.model)
self.assertEqual(n, res)
with brew.arg_scope([myhelper, myhelper2], val=n):
res1 = brew.myhelper(self.model)
res2 = brew.myhelper2(self.model)
Reported by Pylint.
Line: 105
Column: 20
self.assertEqual(n, res)
with brew.arg_scope([myhelper, myhelper2], val=n):
res1 = brew.myhelper(self.model)
res2 = brew.myhelper2(self.model)
self.assertEqual([n, n], [res1, res2])
def test_arg_scope_single(self):
X = np.random.rand(64, 3, 32, 32).astype(np.float32) - 0.5
Reported by Pylint.
Line: 106
Column: 20
with brew.arg_scope([myhelper, myhelper2], val=n):
res1 = brew.myhelper(self.model)
res2 = brew.myhelper2(self.model)
self.assertEqual([n, n], [res1, res2])
def test_arg_scope_single(self):
X = np.random.rand(64, 3, 32, 32).astype(np.float32) - 0.5
Reported by Pylint.
Line: 141
Column: 23
with brew.arg_scope([myhelper], val=-3), \
brew.arg_scope([myhelper], val=-2):
with brew.arg_scope([myhelper], val=n):
res = brew.myhelper(self.model)
self.assertEqual(n, res)
res = brew.myhelper(self.model)
self.assertEqual(res, -2)
res = brew.myhelper(self.model, val=15)
Reported by Pylint.
Line: 143
Column: 19
with brew.arg_scope([myhelper], val=n):
res = brew.myhelper(self.model)
self.assertEqual(n, res)
res = brew.myhelper(self.model)
self.assertEqual(res, -2)
res = brew.myhelper(self.model, val=15)
self.model.Validate()
self.assertEqual(res, 15)
Reported by Pylint.
Line: 146
Column: 15
res = brew.myhelper(self.model)
self.assertEqual(res, -2)
res = brew.myhelper(self.model, val=15)
self.model.Validate()
self.assertEqual(res, 15)
def test_double_register(self):
myhelper = self.myhelper
Reported by Pylint.
caffe2/contrib/fakelowp/test/test_fc_nnpi_fp16.py
62 issues
Line: 4
Column: 1
import numpy as np
import unittest
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
Reported by Pylint.
Line: 5
Column: 1
import unittest
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
Reported by Pylint.
Line: 6
Column: 1
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
Reported by Pylint.
Line: 7
Column: 1
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import datetime
Reported by Pylint.
Line: 8
Column: 1
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import datetime
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 9
Column: 1
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import datetime
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 10
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import datetime
import caffe2.python.serialized_test.serialized_test_util as serial
core.GlobalInit(["caffe2", "--caffe2_log_level=-3", "--glow_global_fp16=1"])
Reported by Pylint.
Line: 11
Column: 1
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import datetime
import caffe2.python.serialized_test.serialized_test_util as serial
core.GlobalInit(["caffe2", "--caffe2_log_level=-3", "--glow_global_fp16=1"])
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import datetime
import caffe2.python.serialized_test.serialized_test_util as serial
core.GlobalInit(["caffe2", "--caffe2_log_level=-3", "--glow_global_fp16=1"])
GLOW_MATMUL_RTOL = 0
Reported by Pylint.
Line: 4
Column: 1
import numpy as np
import unittest
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
Reported by Pylint.