The following issues were found
caffe2/python/onnx/helper.py
28 issues
Line: 9
Column: 1
from caffe2.proto import caffe2_pb2
from onnx.backend.base import namedtupledict
from caffe2.python.onnx.workspace import Workspace
import logging
import time
Reported by Pylint.
Line: 103
Column: 44
return results[0]
def benchmark_pytorch_model(model, inputs, training=False, warmup_iters=3,
main_iters=10, verbose=False):
'''
Run the model several times, and measure the execution time.
Return the execution time per iteration (millisecond).
'''
Reported by Pylint.
Line: 104
Column: 44
def benchmark_pytorch_model(model, inputs, training=False, warmup_iters=3,
main_iters=10, verbose=False):
'''
Run the model several times, and measure the execution time.
Return the execution time per iteration (millisecond).
'''
for _i in range(warmup_iters):
Reported by Pylint.
Line: 117
Column: 14
model(*inputs)
te = time.time()
total_pytorch_time += te - ts
log.info("The PyTorch model execution time per iter is {} milliseconds, "
"{} iters per second.".format(total_pytorch_time / main_iters * 1000,
main_iters / total_pytorch_time))
return total_pytorch_time * 1000 / main_iters
Reported by Pylint.
Line: 1
Column: 1
## @package onnx
# Module caffe2.python.onnx.helper
from caffe2.proto import caffe2_pb2
from onnx.backend.base import namedtupledict
Reported by Pylint.
Line: 11
Column: 1
from caffe2.proto import caffe2_pb2
from onnx.backend.base import namedtupledict
from caffe2.python.onnx.workspace import Workspace
import logging
import time
log = logging.getLogger(__name__)
Reported by Pylint.
Line: 11
Column: 1
from caffe2.proto import caffe2_pb2
from onnx.backend.base import namedtupledict
from caffe2.python.onnx.workspace import Workspace
import logging
import time
log = logging.getLogger(__name__)
Reported by Pylint.
Line: 12
Column: 1
from onnx.backend.base import namedtupledict
from caffe2.python.onnx.workspace import Workspace
import logging
import time
log = logging.getLogger(__name__)
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python.onnx.workspace import Workspace
import logging
import time
log = logging.getLogger(__name__)
Reported by Pylint.
Line: 19
Column: 1
log = logging.getLogger(__name__)
def c2_native_run_op(op_def, inputs):
ws = Workspace()
if isinstance(inputs, dict):
for key, value in inputs.items():
ws.FeedBlob(key, value, op_def.device_option)
else:
Reported by Pylint.
caffe2/quantization/server/resize_nearest_dnnlowp_op_test.py
28 issues
Line: 4
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
Reported by Pylint.
Line: 7
Column: 1
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
Reported by Pylint.
Line: 25
Column: 65
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=50)
def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
resize_nearest = core.CreateOperator(
"Int8ResizeNearest",
Reported by Pylint.
Line: 25
Column: 69
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=50)
def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
resize_nearest = core.CreateOperator(
"Int8ResizeNearest",
Reported by Pylint.
Line: 1
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
Reported by Pylint.
Line: 14
Column: 1
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPResizeNearestOpTest(hu.HypothesisTestCase):
@given(
N=st.integers(0, 3),
H=st.integers(10, 300),
W=st.integers(10, 300),
C=st.integers(1, 32),
Reported by Pylint.
Line: 24
Column: 5
scale_h=st.floats(0.25, 4.0) | st.just(2.0),
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=50)
def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
resize_nearest = core.CreateOperator(
Reported by Pylint.
Line: 24
Column: 5
scale_h=st.floats(0.25, 4.0) | st.just(2.0),
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=50)
def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
resize_nearest = core.CreateOperator(
Reported by Pylint.
Line: 24
Column: 5
scale_h=st.floats(0.25, 4.0) | st.just(2.0),
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=50)
def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
resize_nearest = core.CreateOperator(
Reported by Pylint.
Line: 24
Column: 5
scale_h=st.floats(0.25, 4.0) | st.just(2.0),
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=50)
def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
resize_nearest = core.CreateOperator(
Reported by Pylint.
caffe2/python/mkl/rewrite_graph_test.py
28 issues
Line: 9
Column: 1
import unittest
import numpy as np
import copy
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python.model_helper import ModelHelper
from caffe2.python.models import resnet
from caffe2.python import workspace, brew
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
import copy
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python.model_helper import ModelHelper
from caffe2.python.models import resnet
from caffe2.python import workspace, brew
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 183
Column: 22
return model, [(1, 1, 224, 224)]
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class MKLRewriteTest(hu.HypothesisTestCase):
@given(gen=st.sampled_from([simple_relu, simple_fc,
simple_mlp, simple_cnn]))
def test_mkl_simple_rewrite(self, gen):
cpu_model, (shape,) = gen()
Reported by Pylint.
Line: 254
Column: 5
atol=1e-4, rtol=1e-4)
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import numpy as np
import copy
from hypothesis import given
Reported by Pylint.
Line: 8
Column: 1
import unittest
import numpy as np
import copy
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python.model_helper import ModelHelper
from caffe2.python.models import resnet
Reported by Pylint.
Line: 19
Column: 1
import caffe2.python.mkl.rewrite_graph as rewrite_graph
def deterministic_io(model):
model = copy.deepcopy(model)
for i, op in enumerate(model.InitProto().op):
op.device_option.random_seed = i + 1
if not model.Proto().external_output:
model.Proto().external_output.extend([model.Proto().op[-1].output[0]])
Reported by Pylint.
Line: 21
Column: 12
def deterministic_io(model):
model = copy.deepcopy(model)
for i, op in enumerate(model.InitProto().op):
op.device_option.random_seed = i + 1
if not model.Proto().external_output:
model.Proto().external_output.extend([model.Proto().op[-1].output[0]])
return model
Reported by Pylint.
Line: 27
Column: 1
model.Proto().external_output.extend([model.Proto().op[-1].output[0]])
return model
def simple_fc():
model = ModelHelper(name="r")
brew.fc(model, "data", "fc", 10, 10)
return model, [(1, 10)]
def double_matmul():
Reported by Pylint.
Line: 32
Column: 1
brew.fc(model, "data", "fc", 10, 10)
return model, [(1, 10)]
def double_matmul():
model = ModelHelper(name="r")
fc0 = brew.fc(model, "data", "fc0", 10, 10)
fc1 = brew.fc(model, fc0, "fc1", 10, 10)
model.Proto().external_output[:] = [str(fc0), str(fc1)]
return model, [(1, 10)]
Reported by Pylint.
torch/distributions/dirichlet.py
28 issues
Line: 11
Column: 12
# This helper is exposed for testing.
def _Dirichlet_backward(x, concentration, grad_output):
total = concentration.sum(-1, True).expand_as(concentration)
grad = torch._dirichlet_grad(x, concentration, total)
return grad * (grad_output - (x * grad_output).sum(-1, True))
class _Dirichlet(Function):
@staticmethod
Reported by Pylint.
Line: 18
Column: 13
class _Dirichlet(Function):
@staticmethod
def forward(ctx, concentration):
x = torch._sample_dirichlet(concentration)
ctx.save_for_backward(x, concentration)
return x
@staticmethod
@once_differentiable
Reported by Pylint.
Line: 56
Column: 23
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Dirichlet, _instance)
batch_shape = torch.Size(batch_shape)
new.concentration = self.concentration.expand(batch_shape + self.event_shape)
super(Dirichlet, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
Reported by Pylint.
Line: 70
Column: 18
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
torch.lgamma(self.concentration.sum(-1)) -
torch.lgamma(self.concentration).sum(-1))
@property
def mean(self):
Reported by Pylint.
Line: 71
Column: 17
if self._validate_args:
self._validate_sample(value)
return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
torch.lgamma(self.concentration.sum(-1)) -
torch.lgamma(self.concentration).sum(-1))
@property
def mean(self):
return self.concentration / self.concentration.sum(-1, True)
Reported by Pylint.
Line: 72
Column: 17
self._validate_sample(value)
return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
torch.lgamma(self.concentration.sum(-1)) -
torch.lgamma(self.concentration).sum(-1))
@property
def mean(self):
return self.concentration / self.concentration.sum(-1, True)
Reported by Pylint.
Line: 86
Column: 17
def entropy(self):
k = self.concentration.size(-1)
a0 = self.concentration.sum(-1)
return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
(k - a0) * torch.digamma(a0) -
((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
@property
def _natural_params(self):
Reported by Pylint.
Line: 86
Column: 60
def entropy(self):
k = self.concentration.size(-1)
a0 = self.concentration.sum(-1)
return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
(k - a0) * torch.digamma(a0) -
((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
@property
def _natural_params(self):
Reported by Pylint.
Line: 87
Column: 28
k = self.concentration.size(-1)
a0 = self.concentration.sum(-1)
return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
(k - a0) * torch.digamma(a0) -
((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
@property
def _natural_params(self):
return (self.concentration, )
Reported by Pylint.
Line: 88
Column: 47
a0 = self.concentration.sum(-1)
return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
(k - a0) * torch.digamma(a0) -
((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
@property
def _natural_params(self):
return (self.concentration, )
Reported by Pylint.
torch/sparse/__init__.py
28 issues
Line: 42
Column: 12
beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
"""
return torch._sparse_addmm(mat, mat1, mat2, beta=beta, alpha=alpha)
def mm(mat1: Tensor, mat2: Tensor) -> Tensor:
r"""
Performs a matrix multiplication of the sparse matrix :attr:`mat1`
Reported by Pylint.
Line: 90
Column: 16
size=(2, 3), nnz=6, layout=torch.sparse_coo)
"""
if mat1.is_sparse and mat2.is_sparse:
return torch._sparse_sparse_matmul(mat1, mat2)
return torch._sparse_mm(mat1, mat2)
def sum(input: Tensor, dim: DimOrDims = None,
dtype: Optional[DType] = None) -> Tensor:
Reported by Pylint.
Line: 91
Column: 12
"""
if mat1.is_sparse and mat2.is_sparse:
return torch._sparse_sparse_matmul(mat1, mat2)
return torch._sparse_mm(mat1, mat2)
def sum(input: Tensor, dim: DimOrDims = None,
dtype: Optional[DType] = None) -> Tensor:
r"""
Reported by Pylint.
Line: 152
Column: 20
"""
if dtype is None:
if dim is not None:
return torch._sparse_sum(input, dim)
else:
return torch._sparse_sum(input)
else:
if dim is not None:
return torch._sparse_sum(input, dim, dtype=dtype)
Reported by Pylint.
Line: 154
Column: 20
if dim is not None:
return torch._sparse_sum(input, dim)
else:
return torch._sparse_sum(input)
else:
if dim is not None:
return torch._sparse_sum(input, dim, dtype=dtype)
else:
return torch._sparse_sum(input, dtype=dtype)
Reported by Pylint.
Line: 157
Column: 20
return torch._sparse_sum(input)
else:
if dim is not None:
return torch._sparse_sum(input, dim, dtype=dtype)
else:
return torch._sparse_sum(input, dtype=dtype)
def softmax(input: Tensor, dim: int, dtype: Optional[DType] = None) -> Tensor:
Reported by Pylint.
Line: 159
Column: 20
if dim is not None:
return torch._sparse_sum(input, dim, dtype=dtype)
else:
return torch._sparse_sum(input, dtype=dtype)
def softmax(input: Tensor, dim: int, dtype: Optional[DType] = None) -> Tensor:
r"""Applies a softmax function.
Reported by Pylint.
Line: 186
Column: 12
performed. This is useful for preventing data type
overflows. Default: None
"""
return torch._sparse_softmax(input, dim, dtype=dtype)
def log_softmax(input: Tensor, dim: int, dtype: Optional[DType] = None) -> Tensor:
r"""Applies a softmax function followed by logarithm.
Reported by Pylint.
Line: 203
Column: 12
performed. This is useful for preventing data type
overflows. Default: None
"""
return torch._sparse_log_softmax(input, dim, dtype=dtype)
Reported by Pylint.
Line: 42
Column: 12
beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
"""
return torch._sparse_addmm(mat, mat1, mat2, beta=beta, alpha=alpha)
def mm(mat1: Tensor, mat2: Tensor) -> Tensor:
r"""
Performs a matrix multiplication of the sparse matrix :attr:`mat1`
Reported by Pylint.
caffe2/contrib/tensorboard/tensorboard.py
28 issues
Line: 61
Column: 5
def _show_graph(graph_def):
import IPython.display
code = CODE_TEMPLATE.format(
data=repr(str(graph_def)),
id='graph' + str(np.random.rand()),
height=Config.HEIGHT)
Reported by Pylint.
Line: 118
Column: 9
log.setLevel(logging.INFO)
def parse_net_def(path):
import google.protobuf.text_format # type: ignore[import]
net_def = caffe2_pb2.NetDef()
with open(path) as f:
google.protobuf.text_format.Merge(f.read(), net_def)
return core.Net(net_def)
Reported by Pylint.
Line: 148
Column: 16
with open(filename) as f:
rows = [(float(el) for el in line.split()) for line in f]
return [S(*r) for r in rows]
except Exception as e:
log.exception(e)
return None
def get_named_summaries(root):
summaries = [
Reported by Pylint.
Line: 1
Column: 1
import click
import collections
import logging
import numpy as np
Reported by Pylint.
Line: 7
Column: 1
import click
import collections
import logging
import numpy as np
import os
from caffe2.proto import caffe2_pb2
Reported by Pylint.
Line: 8
Column: 1
import click
import collections
import logging
import numpy as np
import os
from caffe2.proto import caffe2_pb2
from caffe2.python import core
Reported by Pylint.
Line: 10
Column: 1
import collections
import logging
import numpy as np
import os
from caffe2.proto import caffe2_pb2
from caffe2.python import core
import caffe2.contrib.tensorboard.tensorboard_exporter as tb_exporter
Reported by Pylint.
Line: 31
Column: 1
# tensorflow<=0.12.1
from tensorflow.train import SummaryWriter as FileWriter
class Config(object):
HEIGHT = 600
ASPECT_RATIO = 1.6
CODE_TEMPLATE = """
Reported by Pylint.
Line: 31
Column: 1
# tensorflow<=0.12.1
from tensorflow.train import SummaryWriter as FileWriter
class Config(object):
HEIGHT = 600
ASPECT_RATIO = 1.6
CODE_TEMPLATE = """
Reported by Pylint.
Line: 31
Column: 1
# tensorflow<=0.12.1
from tensorflow.train import SummaryWriter as FileWriter
class Config(object):
HEIGHT = 600
ASPECT_RATIO = 1.6
CODE_TEMPLATE = """
Reported by Pylint.
caffe2/contrib/fakelowp/test/test_sls_8bit_nnpi_fp32.py
27 issues
Line: 4
Column: 1
import unittest
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
import numpy as np
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
Reported by Pylint.
Line: 7
Column: 1
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
import numpy as np
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
Reported by Pylint.
Line: 8
Column: 1
import datetime
import numpy as np
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 9
Column: 1
import numpy as np
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
workspace.GlobalInit(
Reported by Pylint.
Line: 11
Column: 1
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
workspace.GlobalInit(
[
Reported by Pylint.
Line: 12
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
workspace.GlobalInit(
[
"caffe2",
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
workspace.GlobalInit(
[
"caffe2",
"--glow_global_fp16=0",
Reported by Pylint.
Line: 4
Column: 1
import unittest
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
import numpy as np
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
Reported by Pylint.
Line: 1
Column: 1
import unittest
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
import numpy as np
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
Reported by Pylint.
torch/optim/rmsprop.py
27 issues
Line: 2
Column: 1
import torch
from . import _functional as F
from .optimizer import Optimizer
class RMSprop(Optimizer):
r"""Implements RMSprop algorithm.
Proposed by G. Hinton in his
Reported by Pylint.
Line: 3
Column: 1
import torch
from . import _functional as F
from .optimizer import Optimizer
class RMSprop(Optimizer):
r"""Implements RMSprop algorithm.
Proposed by G. Hinton in his
Reported by Pylint.
Line: 90
Column: 77
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
Reported by Pylint.
Line: 90
Column: 43
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
Reported by Pylint.
Line: 92
Column: 52
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
Reported by Pylint.
Line: 92
Column: 86
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
Reported by Pylint.
Line: 94
Column: 45
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
if group['momentum'] > 0:
momentum_buffer_list.append(state['momentum_buffer'])
Reported by Pylint.
Line: 94
Column: 79
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
if group['momentum'] > 0:
momentum_buffer_list.append(state['momentum_buffer'])
Reported by Pylint.
Line: 1
Column: 1
import torch
from . import _functional as F
from .optimizer import Optimizer
class RMSprop(Optimizer):
r"""Implements RMSprop algorithm.
Proposed by G. Hinton in his
Reported by Pylint.
Line: 35
Column: 1
"""
def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= momentum:
Reported by Pylint.
caffe2/python/operator_test/sparse_to_dense_mask_op_test.py
27 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 9
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestFcOperator(hu.HypothesisTestCase):
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
Reported by Pylint.
Line: 13
Column: 1
import numpy as np
class TestFcOperator(hu.HypothesisTestCase):
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
Reported by Pylint.
Line: 18
Column: 5
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
Reported by Pylint.
Line: 18
Column: 5
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
Reported by Pylint.
Line: 18
Column: 5
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
Reported by Pylint.
Line: 18
Column: 5
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
Reported by Pylint.
Line: 18
Column: 5
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
Reported by Pylint.
Line: 18
Column: 5
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
Reported by Pylint.
caffe2/python/operator_test/trigonometric_op_test.py
27 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import numpy as np
import unittest
Reported by Pylint.
Line: 44
Column: 50
def test_tan(self, X, gc, dc):
self.assertTrigonometricChecks("Tan", X, lambda x: (np.tan(X),), gc, dc)
def assertTrigonometricChecks(self, op_name, input, reference, gc, dc):
op = core.CreateOperator(op_name, ["X"], ["Y"])
self.assertReferenceChecks(gc, op, [input], reference)
self.assertDeviceChecks(dc, op, [input], [0])
self.assertGradientChecks(gc, op, [input], 0, [0])
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 12
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
import numpy as np
import unittest
class TestTrigonometricOp(serial.SerializedTestCase):
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
Reported by Pylint.
Line: 15
Column: 1
import unittest
class TestTrigonometricOp(serial.SerializedTestCase):
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_acos(self, X, gc, dc):
Reported by Pylint.
Line: 20
Column: 5
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_acos(self, X, gc, dc):
self.assertTrigonometricChecks("Acos", X, lambda x: (np.arccos(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
Reported by Pylint.
Line: 20
Column: 5
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_acos(self, X, gc, dc):
self.assertTrigonometricChecks("Acos", X, lambda x: (np.arccos(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
Reported by Pylint.
Line: 20
Column: 5
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_acos(self, X, gc, dc):
self.assertTrigonometricChecks("Acos", X, lambda x: (np.arccos(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
Reported by Pylint.
Line: 20
Column: 5
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_acos(self, X, gc, dc):
self.assertTrigonometricChecks("Acos", X, lambda x: (np.arccos(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
Reported by Pylint.
Line: 27
Column: 5
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_asin(self, X, gc, dc):
self.assertTrigonometricChecks("Asin", X, lambda x: (np.arcsin(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-100, max_value=100)),
**hu.gcs)
Reported by Pylint.