The following issues were found
test/distributions/test_constraints.py
30 issues
Line: 1
Column: 1
import pytest
import torch
from torch.distributions import biject_to, constraints, transform_to
from torch.testing._internal.common_cuda import TEST_CUDA
CONSTRAINTS = [
(constraints.real,),
Reported by Pylint.
Line: 3
Column: 1
import pytest
import torch
from torch.distributions import biject_to, constraints, transform_to
from torch.testing._internal.common_cuda import TEST_CUDA
CONSTRAINTS = [
(constraints.real,),
Reported by Pylint.
Line: 4
Column: 1
import pytest
import torch
from torch.distributions import biject_to, constraints, transform_to
from torch.testing._internal.common_cuda import TEST_CUDA
CONSTRAINTS = [
(constraints.real,),
Reported by Pylint.
Line: 5
Column: 1
import torch
from torch.distributions import biject_to, constraints, transform_to
from torch.testing._internal.common_cuda import TEST_CUDA
CONSTRAINTS = [
(constraints.real,),
(constraints.real_vector,),
Reported by Pylint.
Line: 1
Column: 1
import pytest
import torch
from torch.distributions import biject_to, constraints, transform_to
from torch.testing._internal.common_cuda import TEST_CUDA
CONSTRAINTS = [
(constraints.real,),
Reported by Pylint.
Line: 36
Column: 1
]
def build_constraint(constraint_fn, args, is_cuda=False):
if not args:
return constraint_fn
t = torch.cuda.DoubleTensor if is_cuda else torch.DoubleTensor
return constraint_fn(*(t(x) if isinstance(x, list) else x for x in args))
Reported by Pylint.
Line: 39
Column: 5
def build_constraint(constraint_fn, args, is_cuda=False):
if not args:
return constraint_fn
t = torch.cuda.DoubleTensor if is_cuda else torch.DoubleTensor
return constraint_fn(*(t(x) if isinstance(x, list) else x for x in args))
@pytest.mark.parametrize('constraint_fn, args', [(c[0], c[1:]) for c in CONSTRAINTS])
@pytest.mark.parametrize('is_cuda', [False,
Reported by Pylint.
Line: 46
Column: 1
@pytest.mark.parametrize('constraint_fn, args', [(c[0], c[1:]) for c in CONSTRAINTS])
@pytest.mark.parametrize('is_cuda', [False,
pytest.param(True, marks=pytest.mark.skipif(not TEST_CUDA,
reason='CUDA not found.'))])
def test_biject_to(constraint_fn, args, is_cuda):
constraint = build_constraint(constraint_fn, args, is_cuda=is_cuda)
try:
t = biject_to(constraint)
except NotImplementedError:
Reported by Pylint.
Line: 47
Column: 1
@pytest.mark.parametrize('is_cuda', [False,
pytest.param(True, marks=pytest.mark.skipif(not TEST_CUDA,
reason='CUDA not found.'))])
def test_biject_to(constraint_fn, args, is_cuda):
constraint = build_constraint(constraint_fn, args, is_cuda=is_cuda)
try:
t = biject_to(constraint)
except NotImplementedError:
pytest.skip('`biject_to` not implemented.')
Reported by Pylint.
Line: 50
Column: 9
def test_biject_to(constraint_fn, args, is_cuda):
constraint = build_constraint(constraint_fn, args, is_cuda=is_cuda)
try:
t = biject_to(constraint)
except NotImplementedError:
pytest.skip('`biject_to` not implemented.')
assert t.bijective, "biject_to({}) is not bijective".format(constraint)
if constraint_fn is constraints.corr_cholesky:
# (D * (D-1)) / 2 (where D = 4) = 6 (size of last dim)
Reported by Pylint.
test/test_bundled_images.py
30 issues
Line: 2
Column: 1
#!/usr/bin/env python3
import torch
import torch.utils.bundled_inputs
import io
import cv2
from torch.testing._internal.common_utils import TestCase
torch.ops.load_library("//caffe2/torch/fb/operators:decode_bundled_image")
Reported by Pylint.
Line: 3
Column: 1
#!/usr/bin/env python3
import torch
import torch.utils.bundled_inputs
import io
import cv2
from torch.testing._internal.common_utils import TestCase
torch.ops.load_library("//caffe2/torch/fb/operators:decode_bundled_image")
Reported by Pylint.
Line: 5
Column: 1
import torch
import torch.utils.bundled_inputs
import io
import cv2
from torch.testing._internal.common_utils import TestCase
torch.ops.load_library("//caffe2/torch/fb/operators:decode_bundled_image")
def model_size(sm):
Reported by Pylint.
Line: 6
Column: 1
import torch.utils.bundled_inputs
import io
import cv2
from torch.testing._internal.common_utils import TestCase
torch.ops.load_library("//caffe2/torch/fb/operators:decode_bundled_image")
def model_size(sm):
buffer = io.BytesIO()
Reported by Pylint.
Line: 21
Column: 1
buffer.seek(0)
return torch.jit.load(buffer)
"""Return an InflatableArg that contains a tensor of the compressed image and the way to decode it
keyword arguments:
img_tensor -- the raw image tensor in HWC or NCHW with pixel value of type unsigned int
if in NCHW format, N should be 1
quality -- the quality needed to compress the image
Reported by Pylint.
Line: 57
Column: 9
im = cv2.imread("caffe2/test/test_img/p1.jpg")
tensor = torch.from_numpy(im)
inflatable_arg = bundle_jpeg_image(tensor, 90)
input = [(inflatable_arg,)]
sm = torch.jit.script(SingleTensorModel())
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(sm, input)
loaded = save_and_load(sm)
inflated = loaded.get_all_bundled_inputs()
decoded_data = inflated[0][0]
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
import torch
import torch.utils.bundled_inputs
import io
import cv2
from torch.testing._internal.common_utils import TestCase
torch.ops.load_library("//caffe2/torch/fb/operators:decode_bundled_image")
Reported by Pylint.
Line: 4
Column: 1
#!/usr/bin/env python3
import torch
import torch.utils.bundled_inputs
import io
import cv2
from torch.testing._internal.common_utils import TestCase
torch.ops.load_library("//caffe2/torch/fb/operators:decode_bundled_image")
Reported by Pylint.
Line: 10
Column: 1
torch.ops.load_library("//caffe2/torch/fb/operators:decode_bundled_image")
def model_size(sm):
buffer = io.BytesIO()
torch.jit.save(sm, buffer)
return len(buffer.getvalue())
def save_and_load(sm):
Reported by Pylint.
Line: 10
Column: 1
torch.ops.load_library("//caffe2/torch/fb/operators:decode_bundled_image")
def model_size(sm):
buffer = io.BytesIO()
torch.jit.save(sm, buffer)
return len(buffer.getvalue())
def save_and_load(sm):
Reported by Pylint.
torch/utils/data/sampler.py
30 issues
Line: 115
Column: 30
def __iter__(self) -> Iterator[int]:
n = len(self.data_source)
if self.generator is None:
self.generator = torch.Generator()
self.generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
if self.replacement:
for _ in range(self.num_samples // 32):
yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=self.generator).tolist()
Reported by Pylint.
Line: 116
Column: 44
n = len(self.data_source)
if self.generator is None:
self.generator = torch.Generator()
self.generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
if self.replacement:
for _ in range(self.num_samples // 32):
yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=self.generator).tolist()
yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=self.generator).tolist()
Reported by Pylint.
Line: 116
Column: 66
n = len(self.data_source)
if self.generator is None:
self.generator = torch.Generator()
self.generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
if self.replacement:
for _ in range(self.num_samples // 32):
yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=self.generator).tolist()
yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=self.generator).tolist()
Reported by Pylint.
Line: 120
Column: 28
if self.replacement:
for _ in range(self.num_samples // 32):
yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=self.generator).tolist()
yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=self.generator).tolist()
else:
yield from torch.randperm(n, generator=self.generator).tolist()
def __len__(self) -> int:
Reported by Pylint.
Line: 120
Column: 68
if self.replacement:
for _ in range(self.num_samples // 32):
yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=self.generator).tolist()
yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=self.generator).tolist()
else:
yield from torch.randperm(n, generator=self.generator).tolist()
def __len__(self) -> int:
Reported by Pylint.
Line: 121
Column: 24
if self.replacement:
for _ in range(self.num_samples // 32):
yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=self.generator).tolist()
yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=self.generator).tolist()
else:
yield from torch.randperm(n, generator=self.generator).tolist()
def __len__(self) -> int:
return self.num_samples
Reported by Pylint.
Line: 121
Column: 83
if self.replacement:
for _ in range(self.num_samples // 32):
yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=self.generator).tolist()
yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=self.generator).tolist()
else:
yield from torch.randperm(n, generator=self.generator).tolist()
def __len__(self) -> int:
return self.num_samples
Reported by Pylint.
Line: 123
Column: 24
yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=self.generator).tolist()
yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=self.generator).tolist()
else:
yield from torch.randperm(n, generator=self.generator).tolist()
def __len__(self) -> int:
return self.num_samples
Reported by Pylint.
Line: 143
Column: 42
self.generator = generator
def __iter__(self) -> Iterator[int]:
return (self.indices[i] for i in torch.randperm(len(self.indices), generator=self.generator))
def __len__(self) -> int:
return len(self.indices)
Reported by Pylint.
Line: 179
Column: 55
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
self.weights = torch.as_tensor(weights, dtype=torch.double)
self.num_samples = num_samples
self.replacement = replacement
self.generator = generator
def __iter__(self) -> Iterator[int]:
Reported by Pylint.
benchmarks/tensorexpr/normalization.py
30 issues
Line: 1
Column: 1
from . import benchmark
from . import tensor_engine
class NormalizationBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, N, C, H, W):
super().__init__(mode, device, dtype)
self.N = N
self.C = C
Reported by Pylint.
Line: 2
Column: 1
from . import benchmark
from . import tensor_engine
class NormalizationBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, N, C, H, W):
super().__init__(mode, device, dtype)
self.N = N
self.C = C
Reported by Pylint.
Line: 1
Column: 1
from . import benchmark
from . import tensor_engine
class NormalizationBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, N, C, H, W):
super().__init__(mode, device, dtype)
self.N = N
self.C = C
Reported by Pylint.
Line: 5
Column: 1
from . import tensor_engine
class NormalizationBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, N, C, H, W):
super().__init__(mode, device, dtype)
self.N = N
self.C = C
self.H = H
Reported by Pylint.
Line: 5
Column: 1
from . import tensor_engine
class NormalizationBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, N, C, H, W):
super().__init__(mode, device, dtype)
self.N = N
self.C = C
self.H = H
Reported by Pylint.
Line: 6
Column: 5
class NormalizationBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, N, C, H, W):
super().__init__(mode, device, dtype)
self.N = N
self.C = C
self.H = H
self.W = W
Reported by Pylint.
Line: 6
Column: 5
class NormalizationBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, N, C, H, W):
super().__init__(mode, device, dtype)
self.N = N
self.C = C
self.H = H
self.W = W
Reported by Pylint.
Line: 6
Column: 5
class NormalizationBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, N, C, H, W):
super().__init__(mode, device, dtype)
self.N = N
self.C = C
self.H = H
self.W = W
Reported by Pylint.
Line: 6
Column: 5
class NormalizationBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, N, C, H, W):
super().__init__(mode, device, dtype)
self.N = N
self.C = C
self.H = H
self.W = W
Reported by Pylint.
Line: 6
Column: 5
class NormalizationBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, N, C, H, W):
super().__init__(mode, device, dtype)
self.N = N
self.C = C
self.H = H
self.W = W
Reported by Pylint.
caffe2/python/nomnigraph_transformations_test.py
30 issues
Line: 12
Column: 1
from caffe2.python.nomnigraph_transformations import transpose_network
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
class TestNomnigraphTransformations(tu.TestCase):
def test_simple_replace(self):
Reported by Pylint.
Line: 13
Column: 1
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
class TestNomnigraphTransformations(tu.TestCase):
def test_simple_replace(self):
net = core.Net("name")
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core, workspace
from caffe2.python import test_util as tu
import caffe2.python.nomnigraph as ng
from caffe2.python.nomnigraph_transformations import transpose_network
Reported by Pylint.
Line: 16
Column: 1
import hypothesis.strategies as st
class TestNomnigraphTransformations(tu.TestCase):
def test_simple_replace(self):
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
nn = ng.NNModule(net)
fc = nn.controlFlow[0]
Reported by Pylint.
Line: 17
Column: 5
class TestNomnigraphTransformations(tu.TestCase):
def test_simple_replace(self):
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
nn = ng.NNModule(net)
fc = nn.controlFlow[0]
add = nn.createNode(core.CreateOperator("Add", ["X"], ["Y"], engine="CUDNN"))
Reported by Pylint.
Line: 17
Column: 5
class TestNomnigraphTransformations(tu.TestCase):
def test_simple_replace(self):
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
nn = ng.NNModule(net)
fc = nn.controlFlow[0]
add = nn.createNode(core.CreateOperator("Add", ["X"], ["Y"], engine="CUDNN"))
Reported by Pylint.
Line: 20
Column: 9
def test_simple_replace(self):
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
nn = ng.NNModule(net)
fc = nn.controlFlow[0]
add = nn.createNode(core.CreateOperator("Add", ["X"], ["Y"], engine="CUDNN"))
nn.replaceNode(fc, add)
nn.deleteNode(fc)
Reported by Pylint.
Line: 21
Column: 9
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
nn = ng.NNModule(net)
fc = nn.controlFlow[0]
add = nn.createNode(core.CreateOperator("Add", ["X"], ["Y"], engine="CUDNN"))
nn.replaceNode(fc, add)
nn.deleteNode(fc)
# Test it out
Reported by Pylint.
Line: 36
Column: 5
expected_out = np.array([2, 4, 6])
np.testing.assert_almost_equal(out, expected_out)
def test_simple_rewire(self):
net = core.Net("name")
# Rewire this so that we get
# c = Add(a, d)
# e = Mul(c, b)
#
Reported by Pylint.
Line: 36
Column: 5
expected_out = np.array([2, 4, 6])
np.testing.assert_almost_equal(out, expected_out)
def test_simple_rewire(self):
net = core.Net("name")
# Rewire this so that we get
# c = Add(a, d)
# e = Mul(c, b)
#
Reported by Pylint.
torch/utils/hooks.py
30 issues
Line: 40
Column: 24
def __enter__(self) -> 'RemovableHandle':
return self
def __exit__(self, type: Any, value: Any, tb: Any) -> None:
self.remove()
def unserializable_hook(f):
"""
Reported by Pylint.
Line: 55
Column: 8
def warn_if_has_hooks(tensor):
if tensor._backward_hooks:
for k in tensor._backward_hooks:
hook = tensor._backward_hooks[k]
if not hasattr(k, "__torch_unserializable__"):
warnings.warn("backward hook {} on tensor will not be "
"serialized. If this is expected, you can "
Reported by Pylint.
Line: 56
Column: 18
def warn_if_has_hooks(tensor):
if tensor._backward_hooks:
for k in tensor._backward_hooks:
hook = tensor._backward_hooks[k]
if not hasattr(k, "__torch_unserializable__"):
warnings.warn("backward hook {} on tensor will not be "
"serialized. If this is expected, you can "
"decorate the function with @torch.utils.hooks.unserializable_hook "
Reported by Pylint.
Line: 57
Column: 20
def warn_if_has_hooks(tensor):
if tensor._backward_hooks:
for k in tensor._backward_hooks:
hook = tensor._backward_hooks[k]
if not hasattr(k, "__torch_unserializable__"):
warnings.warn("backward hook {} on tensor will not be "
"serialized. If this is expected, you can "
"decorate the function with @torch.utils.hooks.unserializable_hook "
"to suppress this warning".format(repr(hook)))
Reported by Pylint.
Line: 136
Column: 23
if not requires_grad:
return args, None
new_tensors = torch.nn.modules._functions.BackwardHookFunction.apply(*tensors)
if len(new_tensors) == 0:
raise RuntimeError("Cannot set Module backward hook for a Module with no input Tensors.")
grad_fns = [t.grad_fn for t in new_tensors if t.grad_fn is not None and t.grad_fn.name() == "BackwardHookFunctionBackward"]
if len(grad_fns) == 0:
Reported by Pylint.
Line: 1
Column: 1
import torch
from collections import OrderedDict
import weakref
import warnings
import functools
from typing import Any
class RemovableHandle(object):
Reported by Pylint.
Line: 2
Column: 1
import torch
from collections import OrderedDict
import weakref
import warnings
import functools
from typing import Any
class RemovableHandle(object):
Reported by Pylint.
Line: 3
Column: 1
import torch
from collections import OrderedDict
import weakref
import warnings
import functools
from typing import Any
class RemovableHandle(object):
Reported by Pylint.
Line: 4
Column: 1
import torch
from collections import OrderedDict
import weakref
import warnings
import functools
from typing import Any
class RemovableHandle(object):
Reported by Pylint.
Line: 5
Column: 1
from collections import OrderedDict
import weakref
import warnings
import functools
from typing import Any
class RemovableHandle(object):
"""A handle which provides the capability to remove a hook."""
Reported by Pylint.
caffe2/python/helpers/normalization.py
30 issues
Line: 24
Column: 5
blobs_out = blob_out
else:
blobs_out = [blob_out, "_" + blob_out + "_scale"]
lrn = model.net.LRN(
blob_in,
blobs_out,
order=order,
**kwargs
)
Reported by Pylint.
Line: 159
Column: 16
def spatial_gn(model, blob_in, blob_out, dim_in,
init_scale=1., init_bias=0.,
ScaleInitializer=None, BiasInitializer=None,
RunningMeanInitializer=None, RunningVarianceInitializer=None,
order="NCHW", **kwargs):
'''
Group normalizes the input, cf. https://arxiv.org/abs/1803.08494.
'''
Reported by Pylint.
Line: 159
Column: 45
def spatial_gn(model, blob_in, blob_out, dim_in,
init_scale=1., init_bias=0.,
ScaleInitializer=None, BiasInitializer=None,
RunningMeanInitializer=None, RunningVarianceInitializer=None,
order="NCHW", **kwargs):
'''
Group normalizes the input, cf. https://arxiv.org/abs/1803.08494.
'''
Reported by Pylint.
Line: 160
Column: 16
init_scale=1., init_bias=0.,
ScaleInitializer=None, BiasInitializer=None,
RunningMeanInitializer=None, RunningVarianceInitializer=None,
order="NCHW", **kwargs):
'''
Group normalizes the input, cf. https://arxiv.org/abs/1803.08494.
'''
blob_out = blob_out or model.net.NextName()
Reported by Pylint.
Line: 296
Column: 5
RunningMeanInitializer = initializers.ExternalInitializer()
RunningVarianceInitializer = initializers.ExternalInitializer()
running_mean = model.create_param(
param_name=blob_out + '_rm',
shape=[dim_in],
initializer=RunningMeanInitializer,
tags=ParameterTags.COMPUTED_PARAM
)
Reported by Pylint.
Line: 304
Column: 5
)
# this is just running variance
running_inv_var = model.create_param(
param_name=blob_out + '_riv',
shape=[dim_in],
initializer=RunningVarianceInitializer,
tags=ParameterTags.COMPUTED_PARAM
)
Reported by Pylint.
Line: 1
Column: 1
## @package normalization
# Module caffe2.python.helpers.normalization
from caffe2.python import scope
from caffe2.python.modeling.parameter_info import ParameterTags
Reported by Pylint.
Line: 31
Column: 5
**kwargs
)
if use_cudnn and (not is_cpu):
return lrn
else:
return lrn[0]
Reported by Pylint.
Line: 41
Column: 5
"""Softmax."""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
if blob_out is not None:
return model.net.Softmax(blob_in, blob_out, **kwargs)
else:
return model.net.Softmax(blob_in, **kwargs)
Reported by Pylint.
Line: 47
Column: 1
return model.net.Softmax(blob_in, **kwargs)
def instance_norm(model, blob_in, blob_out, dim_in, order="NCHW", **kwargs):
blob_out = blob_out or model.net.NextName()
# Input: input, scale, bias
# Output: output, saved_mean, saved_inv_std
# scale: initialize with ones
# bias: initialize with zeros
Reported by Pylint.
torch/optim/__init__.py
30 issues
Line: 8
Column: 1
future.
"""
from .adadelta import Adadelta
from .adagrad import Adagrad
from .adam import Adam
from .adamw import AdamW
from .sparse_adam import SparseAdam
from .adamax import Adamax
Reported by Pylint.
Line: 9
Column: 1
"""
from .adadelta import Adadelta
from .adagrad import Adagrad
from .adam import Adam
from .adamw import AdamW
from .sparse_adam import SparseAdam
from .adamax import Adamax
from .asgd import ASGD
Reported by Pylint.
Line: 10
Column: 1
from .adadelta import Adadelta
from .adagrad import Adagrad
from .adam import Adam
from .adamw import AdamW
from .sparse_adam import SparseAdam
from .adamax import Adamax
from .asgd import ASGD
from .sgd import SGD
Reported by Pylint.
Line: 11
Column: 1
from .adadelta import Adadelta
from .adagrad import Adagrad
from .adam import Adam
from .adamw import AdamW
from .sparse_adam import SparseAdam
from .adamax import Adamax
from .asgd import ASGD
from .sgd import SGD
from .radam import RAdam
Reported by Pylint.
Line: 12
Column: 1
from .adagrad import Adagrad
from .adam import Adam
from .adamw import AdamW
from .sparse_adam import SparseAdam
from .adamax import Adamax
from .asgd import ASGD
from .sgd import SGD
from .radam import RAdam
from .rprop import Rprop
Reported by Pylint.
Line: 13
Column: 1
from .adam import Adam
from .adamw import AdamW
from .sparse_adam import SparseAdam
from .adamax import Adamax
from .asgd import ASGD
from .sgd import SGD
from .radam import RAdam
from .rprop import Rprop
from .rmsprop import RMSprop
Reported by Pylint.
Line: 14
Column: 1
from .adamw import AdamW
from .sparse_adam import SparseAdam
from .adamax import Adamax
from .asgd import ASGD
from .sgd import SGD
from .radam import RAdam
from .rprop import Rprop
from .rmsprop import RMSprop
from .optimizer import Optimizer
Reported by Pylint.
Line: 15
Column: 1
from .sparse_adam import SparseAdam
from .adamax import Adamax
from .asgd import ASGD
from .sgd import SGD
from .radam import RAdam
from .rprop import Rprop
from .rmsprop import RMSprop
from .optimizer import Optimizer
from .nadam import NAdam
Reported by Pylint.
Line: 16
Column: 1
from .adamax import Adamax
from .asgd import ASGD
from .sgd import SGD
from .radam import RAdam
from .rprop import Rprop
from .rmsprop import RMSprop
from .optimizer import Optimizer
from .nadam import NAdam
from .lbfgs import LBFGS
Reported by Pylint.
Line: 17
Column: 1
from .asgd import ASGD
from .sgd import SGD
from .radam import RAdam
from .rprop import Rprop
from .rmsprop import RMSprop
from .optimizer import Optimizer
from .nadam import NAdam
from .lbfgs import LBFGS
from . import lr_scheduler
Reported by Pylint.
caffe2/python/operator_test/rank_loss_operator_test.py
30 issues
Line: 7
Column: 1
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestPairWiseLossOps(serial.SerializedTestCase):
@given(X=hu.arrays(dims=[2, 1],
Reported by Pylint.
Line: 21
Column: 57
elements=st.integers(min_value=0, max_value=1),
dtype=np.float32),
**hu.gcs_cpu_only)
def test_pair_wise_loss_predictions(self, X, label, gc, dc):
workspace.FeedBlob('X', X)
workspace.FeedBlob('label', label)
new_label = np.array([label[1], label[0]])
new_x = np.array([X[1], X[0]])
workspace.FeedBlob('new_x', new_x)
Reported by Pylint.
Line: 21
Column: 61
elements=st.integers(min_value=0, max_value=1),
dtype=np.float32),
**hu.gcs_cpu_only)
def test_pair_wise_loss_predictions(self, X, label, gc, dc):
workspace.FeedBlob('X', X)
workspace.FeedBlob('label', label)
new_label = np.array([label[1], label[0]])
new_x = np.array([X[1], X[0]])
workspace.FeedBlob('new_x', new_x)
Reported by Pylint.
Line: 58
Column: 62
dY=hu.arrays(dims=[1],
elements=hu.floats(min_value=1, max_value=10)),
**hu.gcs_cpu_only)
def test_pair_wise_loss_gradient(self, X, label, dY, gc, dc):
workspace.FeedBlob('X', X)
workspace.FeedBlob('dY', dY)
workspace.FeedBlob('label', label)
net = core.Net('net')
net.PairWiseLossGradient(
Reported by Pylint.
Line: 58
Column: 58
dY=hu.arrays(dims=[1],
elements=hu.floats(min_value=1, max_value=10)),
**hu.gcs_cpu_only)
def test_pair_wise_loss_gradient(self, X, label, dY, gc, dc):
workspace.FeedBlob('X', X)
workspace.FeedBlob('dY', dY)
workspace.FeedBlob('label', label)
net = core.Net('net')
net.PairWiseLossGradient(
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 14
Column: 1
import numpy as np
class TestPairWiseLossOps(serial.SerializedTestCase):
@given(X=hu.arrays(dims=[2, 1],
elements=hu.floats(min_value=0.0, max_value=10.0)),
label=hu.arrays(dims=[2, 1],
elements=st.integers(min_value=0, max_value=1),
dtype=np.float32),
Reported by Pylint.
Line: 21
Column: 5
elements=st.integers(min_value=0, max_value=1),
dtype=np.float32),
**hu.gcs_cpu_only)
def test_pair_wise_loss_predictions(self, X, label, gc, dc):
workspace.FeedBlob('X', X)
workspace.FeedBlob('label', label)
new_label = np.array([label[1], label[0]])
new_x = np.array([X[1], X[0]])
workspace.FeedBlob('new_x', new_x)
Reported by Pylint.
Line: 21
Column: 5
elements=st.integers(min_value=0, max_value=1),
dtype=np.float32),
**hu.gcs_cpu_only)
def test_pair_wise_loss_predictions(self, X, label, gc, dc):
workspace.FeedBlob('X', X)
workspace.FeedBlob('label', label)
new_label = np.array([label[1], label[0]])
new_x = np.array([X[1], X[0]])
workspace.FeedBlob('new_x', new_x)
Reported by Pylint.
torch/utils/benchmark/utils/sparse_fuzzer.py
30 issues
Line: 19
Column: 15
nnz: Optional[str] = None,
density: Optional[str] = None,
coalesced: Optional[str] = None,
dtype=torch.float32,
cuda=False
):
"""
Args:
name:
Reported by Pylint.
Line: 75
Column: 17
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
if dtype.is_floating_point:
v = torch.rand(size=v_size, dtype=dtype, device="cpu")
else:
v = torch.randint(1, 127, size=v_size, dtype=dtype, device="cpu")
i = torch.rand(sparse_dim, nnz, device="cpu")
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
Reported by Pylint.
Line: 77
Column: 17
if dtype.is_floating_point:
v = torch.rand(size=v_size, dtype=dtype, device="cpu")
else:
v = torch.randint(1, 127, size=v_size, dtype=dtype, device="cpu")
i = torch.rand(sparse_dim, nnz, device="cpu")
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
Reported by Pylint.
Line: 79
Column: 13
else:
v = torch.randint(1, 127, size=v_size, dtype=dtype, device="cpu")
i = torch.rand(sparse_dim, nnz, device="cpu")
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if not is_coalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
Reported by Pylint.
Line: 80
Column: 16
v = torch.randint(1, 127, size=v_size, dtype=dtype, device="cpu")
i = torch.rand(sparse_dim, nnz, device="cpu")
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if not is_coalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
Reported by Pylint.
Line: 81
Column: 18
i = torch.rand(sparse_dim, nnz, device="cpu")
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if not is_coalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
Reported by Pylint.
Line: 84
Column: 17
i = i.to(torch.long)
if not is_coalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size))
if is_coalesced:
x = x.coalesce()
Reported by Pylint.
Line: 84
Column: 31
i = i.to(torch.long)
if not is_coalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size))
if is_coalesced:
x = x.coalesce()
Reported by Pylint.
Line: 85
Column: 17
if not is_coalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size))
if is_coalesced:
x = x.coalesce()
return x
Reported by Pylint.
Line: 87
Column: 13
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size))
if is_coalesced:
x = x.coalesce()
return x
def _make_tensor(self, params, state):
Reported by Pylint.