The following issues were found
torch/optim/_multi_tensor/sgd.py
31 issues
Line: 2
Column: 1
import torch
from ..optimizer import Optimizer, required
from collections import defaultdict
class SGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Reported by Pylint.
Line: 115
Column: 25
return loss
if weight_decay != 0:
grads = torch._foreach_add(grads, params_with_grad, alpha=weight_decay)
if momentum != 0:
bufs = []
all_states_with_momentum_buffer = True
Reported by Pylint.
Line: 129
Column: 21
bufs.append(states[i]['momentum_buffer'])
if all_states_with_momentum_buffer:
torch._foreach_mul_(bufs, momentum)
torch._foreach_add_(bufs, grads, alpha=1 - dampening)
else:
bufs = []
for i in range(len(states)):
if 'momentum_buffer' not in states[i]:
Reported by Pylint.
Line: 130
Column: 21
if all_states_with_momentum_buffer:
torch._foreach_mul_(bufs, momentum)
torch._foreach_add_(bufs, grads, alpha=1 - dampening)
else:
bufs = []
for i in range(len(states)):
if 'momentum_buffer' not in states[i]:
buf = states[i]['momentum_buffer'] = torch.clone(grads[i]).detach()
Reported by Pylint.
Line: 135
Column: 66
bufs = []
for i in range(len(states)):
if 'momentum_buffer' not in states[i]:
buf = states[i]['momentum_buffer'] = torch.clone(grads[i]).detach()
else:
buf = states[i]['momentum_buffer']
buf.mul_(momentum).add_(grads[i], alpha=1 - dampening)
bufs.append(buf)
Reported by Pylint.
Line: 143
Column: 21
bufs.append(buf)
if nesterov:
torch._foreach_add_(grads, bufs, alpha=momentum)
else:
grads = bufs
if not has_sparse_grad:
torch._foreach_add_(params_with_grad, grads, alpha=-group['lr'])
Reported by Pylint.
Line: 148
Column: 17
grads = bufs
if not has_sparse_grad:
torch._foreach_add_(params_with_grad, grads, alpha=-group['lr'])
else:
# foreach APIs dont support sparse
for i in range(len(params_with_grad)):
params_with_grad[i].add_(grads[i], alpha=-group['lr'])
Reported by Pylint.
Line: 177
Column: 21
for _, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
torch._foreach_zero_(grads)
Reported by Pylint.
Line: 115
Column: 25
return loss
if weight_decay != 0:
grads = torch._foreach_add(grads, params_with_grad, alpha=weight_decay)
if momentum != 0:
bufs = []
all_states_with_momentum_buffer = True
Reported by Pylint.
Line: 129
Column: 21
bufs.append(states[i]['momentum_buffer'])
if all_states_with_momentum_buffer:
torch._foreach_mul_(bufs, momentum)
torch._foreach_add_(bufs, grads, alpha=1 - dampening)
else:
bufs = []
for i in range(len(states)):
if 'momentum_buffer' not in states[i]:
Reported by Pylint.
torch/_linalg_utils.py
31 issues
Line: 14
Column: 28
def is_sparse(A):
"""Check if tensor A is a sparse tensor"""
if isinstance(A, torch.Tensor):
return A.layout == torch.sparse_coo
error_str = "expected Tensor"
if not torch.jit.is_scripting():
error_str += " but got {}".format(type(A))
raise TypeError(error_str)
Reported by Pylint.
Line: 27
Column: 33
Integer types map to float32.
"""
dtype = A.dtype
if dtype in (torch.float16, torch.float32, torch.float64):
return dtype
return torch.float32
def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:
Reported by Pylint.
Line: 27
Column: 18
Integer types map to float32.
"""
dtype = A.dtype
if dtype in (torch.float16, torch.float32, torch.float64):
return dtype
return torch.float32
def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:
Reported by Pylint.
Line: 27
Column: 48
Integer types map to float32.
"""
dtype = A.dtype
if dtype in (torch.float16, torch.float32, torch.float64):
return dtype
return torch.float32
def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:
Reported by Pylint.
Line: 29
Column: 12
dtype = A.dtype
if dtype in (torch.float16, torch.float32, torch.float64):
return dtype
return torch.float32
def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:
"""Multiply two matrices.
Reported by Pylint.
Line: 42
Column: 12
return B
if is_sparse(A):
return torch.sparse.mm(A, B)
return torch.matmul(A, B)
def conjugate(A):
"""Return conjugate of tensor A.
Reported by Pylint.
Line: 87
Column: 13
# torch.orgqr is not available in CUDA
Q = torch.linalg.qr(A).Q
else:
Q = torch.orgqr(*torch.geqrf(A))
return Q
def symeig(A: Tensor, largest: Optional[bool] = False) -> Tuple[Tensor, Tensor]:
"""Return eigenpairs of A with specified ordering.
Reported by Pylint.
Line: 87
Column: 26
# torch.orgqr is not available in CUDA
Q = torch.linalg.qr(A).Q
else:
Q = torch.orgqr(*torch.geqrf(A))
return Q
def symeig(A: Tensor, largest: Optional[bool] = False) -> Tuple[Tensor, Tensor]:
"""Return eigenpairs of A with specified ordering.
Reported by Pylint.
Line: 99
Column: 13
E, Z = torch.linalg.eigh(A, UPLO='U')
# assuming that E is ordered
if largest:
E = torch.flip(E, dims=(-1,))
Z = torch.flip(Z, dims=(-1,))
return E, Z
Reported by Pylint.
Line: 100
Column: 13
# assuming that E is ordered
if largest:
E = torch.flip(E, dims=(-1,))
Z = torch.flip(Z, dims=(-1,))
return E, Z
Reported by Pylint.
torch/distributions/lkj_cholesky.py
31 issues
Line: 61
Column: 23
self.dim = dim
self.concentration, = broadcast_all(concentration)
batch_shape = self.concentration.size()
event_shape = torch.Size((dim, dim))
# This is used to draw vectorized samples from the beta distribution in Sec. 3.2 of [1].
marginal_conc = self.concentration + 0.5 * (self.dim - 2)
offset = torch.arange(self.dim - 1, dtype=self.concentration.dtype, device=self.concentration.device)
offset = torch.cat([offset.new_zeros((1,)), offset])
beta_conc1 = offset + 0.5
Reported by Pylint.
Line: 64
Column: 18
event_shape = torch.Size((dim, dim))
# This is used to draw vectorized samples from the beta distribution in Sec. 3.2 of [1].
marginal_conc = self.concentration + 0.5 * (self.dim - 2)
offset = torch.arange(self.dim - 1, dtype=self.concentration.dtype, device=self.concentration.device)
offset = torch.cat([offset.new_zeros((1,)), offset])
beta_conc1 = offset + 0.5
beta_conc0 = marginal_conc.unsqueeze(-1) - 0.5 * offset
self._beta = Beta(beta_conc1, beta_conc0)
super(LKJCholesky, self).__init__(batch_shape, event_shape, validate_args)
Reported by Pylint.
Line: 65
Column: 18
# This is used to draw vectorized samples from the beta distribution in Sec. 3.2 of [1].
marginal_conc = self.concentration + 0.5 * (self.dim - 2)
offset = torch.arange(self.dim - 1, dtype=self.concentration.dtype, device=self.concentration.device)
offset = torch.cat([offset.new_zeros((1,)), offset])
beta_conc1 = offset + 0.5
beta_conc0 = marginal_conc.unsqueeze(-1) - 0.5 * offset
self._beta = Beta(beta_conc1, beta_conc0)
super(LKJCholesky, self).__init__(batch_shape, event_shape, validate_args)
Reported by Pylint.
Line: 73
Column: 23
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(LKJCholesky, _instance)
batch_shape = torch.Size(batch_shape)
new.dim = self.dim
new.concentration = self.concentration.expand(batch_shape)
new._beta = self._beta.expand(batch_shape + (self.dim,))
super(LKJCholesky, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
Reported by Pylint.
Line: 81
Column: 35
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=torch.Size()):
# This uses the Onion method, but there are a few differences from [1] Sec. 3.2:
# - This vectorizes the for loop and also works for heterogeneous eta.
# - Same algorithm generalizes to n=1.
# - The procedure is simplified since we are sampling the cholesky factor of
# the correlation matrix instead of the correlation matrix itself. As such,
Reported by Pylint.
Line: 89
Column: 20
# the correlation matrix instead of the correlation matrix itself. As such,
# we only need to generate `w`.
y = self._beta.sample(sample_shape).unsqueeze(-1)
u_normal = torch.randn(self._extended_shape(sample_shape),
dtype=y.dtype,
device=y.device).tril(-1)
u_hypersphere = u_normal / u_normal.norm(dim=-1, keepdim=True)
# Replace NaNs in first row
u_hypersphere[..., 0, :].fill_(0.)
Reported by Pylint.
Line: 95
Column: 13
u_hypersphere = u_normal / u_normal.norm(dim=-1, keepdim=True)
# Replace NaNs in first row
u_hypersphere[..., 0, :].fill_(0.)
w = torch.sqrt(y) * u_hypersphere
# Fill diagonal elements; clamp for numerical stability
eps = torch.finfo(w.dtype).tiny
diag_elems = torch.clamp(1 - torch.sum(w**2, dim=-1), min=eps).sqrt()
w += torch.diag_embed(diag_elems)
return w
Reported by Pylint.
Line: 97
Column: 15
u_hypersphere[..., 0, :].fill_(0.)
w = torch.sqrt(y) * u_hypersphere
# Fill diagonal elements; clamp for numerical stability
eps = torch.finfo(w.dtype).tiny
diag_elems = torch.clamp(1 - torch.sum(w**2, dim=-1), min=eps).sqrt()
w += torch.diag_embed(diag_elems)
return w
def log_prob(self, value):
Reported by Pylint.
Line: 98
Column: 22
w = torch.sqrt(y) * u_hypersphere
# Fill diagonal elements; clamp for numerical stability
eps = torch.finfo(w.dtype).tiny
diag_elems = torch.clamp(1 - torch.sum(w**2, dim=-1), min=eps).sqrt()
w += torch.diag_embed(diag_elems)
return w
def log_prob(self, value):
# See: https://mc-stan.org/docs/2_25/functions-reference/cholesky-lkj-correlation-distribution.html
Reported by Pylint.
Line: 98
Column: 38
w = torch.sqrt(y) * u_hypersphere
# Fill diagonal elements; clamp for numerical stability
eps = torch.finfo(w.dtype).tiny
diag_elems = torch.clamp(1 - torch.sum(w**2, dim=-1), min=eps).sqrt()
w += torch.diag_embed(diag_elems)
return w
def log_prob(self, value):
# See: https://mc-stan.org/docs/2_25/functions-reference/cholesky-lkj-correlation-distribution.html
Reported by Pylint.
scripts/release_notes/commitlist.py
31 issues
Line: 11
Column: 1
import re
"""
Example Usages
Create a new commitlist for consumption by categorize.py.
Said commitlist contains commits between v1.5.0 and f5bc91f851.
Reported by Pylint.
Line: 1
Column: 1
import argparse
from common import run, topics
from collections import defaultdict
import os
import csv
import pprint
from common import CommitDataCache
import re
Reported by Pylint.
Line: 3
Column: 1
import argparse
from common import run, topics
from collections import defaultdict
import os
import csv
import pprint
from common import CommitDataCache
import re
Reported by Pylint.
Line: 4
Column: 1
import argparse
from common import run, topics
from collections import defaultdict
import os
import csv
import pprint
from common import CommitDataCache
import re
Reported by Pylint.
Line: 5
Column: 1
from common import run, topics
from collections import defaultdict
import os
import csv
import pprint
from common import CommitDataCache
import re
Reported by Pylint.
Line: 6
Column: 1
from collections import defaultdict
import os
import csv
import pprint
from common import CommitDataCache
import re
"""
Reported by Pylint.
Line: 8
Column: 1
import csv
import pprint
from common import CommitDataCache
import re
"""
Example Usages
Reported by Pylint.
Line: 25
Column: 1
"""
class Commit:
def __init__(self, commit_hash, category, topic, title):
self.commit_hash = commit_hash
self.category = category
self.topic = topic
self.title = title
Reported by Pylint.
Line: 43
Column: 1
def __repr__(self):
return f'Commit({self.commit_hash}, {self.category}, {self.topic}, {self.title})'
class CommitList:
# NB: Private ctor. Use `from_existing` or `create_new`.
def __init__(self, path, commits):
self.path = path
self.commits = commits
Reported by Pylint.
Line: 50
Column: 5
self.commits = commits
@staticmethod
def from_existing(path):
commits = CommitList.read_from_disk(path)
return CommitList(path, commits)
@staticmethod
def create_new(path, base_version, new_version):
Reported by Pylint.
caffe2/python/ideep/shape_op_test.py
31 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ShapeTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 128),
c=st.integers(1, 128),
h=st.integers(1, 128),
w=st.integers(1, 128),
Reported by Pylint.
Line: 23
Column: 38
w=st.integers(1, 128),
**mu.gcs)
@settings(max_examples=10, deadline=None)
def test_shape(self, n, c, h, w, gc, dc):
op0 = core.CreateOperator(
"Shape",
["X0"],
["Y0"],
device_option=dc[0]
Reported by Pylint.
Line: 48
Column: 13
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
@given(n=st.integers(1, 128),
c=st.integers(1, 128),
h=st.integers(1, 128),
w=st.integers(1, 128),
Reported by Pylint.
Line: 57
Column: 54
axes=st.lists(st.integers(0, 3), min_size=1, max_size=3),
**mu.gcs)
@settings(max_examples=10, deadline=None)
def test_shape_with_axes(self, n, c, h, w, axes, gc, dc):
axes = list(set(axes)).sort()
op0 = core.CreateOperator(
"Shape",
["X0"],
["Y0"],
Reported by Pylint.
Line: 85
Column: 13
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
if __name__ == "__main__":
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
Reported by Pylint.
Line: 16
Column: 1
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ShapeTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 128),
c=st.integers(1, 128),
h=st.integers(1, 128),
w=st.integers(1, 128),
**mu.gcs)
Reported by Pylint.
Line: 23
Column: 5
w=st.integers(1, 128),
**mu.gcs)
@settings(max_examples=10, deadline=None)
def test_shape(self, n, c, h, w, gc, dc):
op0 = core.CreateOperator(
"Shape",
["X0"],
["Y0"],
device_option=dc[0]
Reported by Pylint.
caffe2/python/ideep/concat_split_op_test.py
31 issues
Line: 7
Column: 1
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given, settings
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 11
Column: 1
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given, settings
import caffe2.python.ideep_test_util as mu
@st.composite
def _tensor_splits(draw, add_axis=False):
"""Generates (axis, split_info, tensor_splits) tuples."""
Reported by Pylint.
Line: 48
Column: 22
)
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestConcatSplitOps(hu.HypothesisTestCase):
@given(tensor_splits=_tensor_splits(),
**mu.gcs)
@settings(deadline=10000)
def test_concat(self, tensor_splits, gc, dc):
Reported by Pylint.
Line: 91
Column: 23
**kwargs
)
def split_ref(input, split=split_info):
s = np.cumsum([0] + list(split))
return [
np.array(input.take(np.arange(s[i], s[i + 1]), axis=axis))
for i in range(len(split))
]
Reported by Pylint.
Line: 91
Column: 9
**kwargs
)
def split_ref(input, split=split_info):
s = np.cumsum([0] + list(split))
return [
np.array(input.take(np.arange(s[i], s[i + 1]), axis=axis))
for i in range(len(split))
]
Reported by Pylint.
Line: 120
Column: 57
@given(tensor_splits=_tensor_splits(add_axis=True), **mu.gcs)
def test_concat_with_TensorCPU(self, tensor_splits, gc, dc):
axis, _, splits = tensor_splits
op0 = core.CreateOperator(
"Concat",
['X_{}'.format(i) for i in range(len(splits))],
['concat_result0', 'split_info0'],
Reported by Pylint.
Line: 154
Column: 13
print(res1.flatten())
print(res0.flatten())
print(np.max(np.abs(res1 - res0)))
self.assertTrue(False)
if not np.allclose(inf0, inf1, atol=0.0, rtol=0.0):
print(inf1.flatten())
print(inf0.flatten())
print(np.max(np.abs(inf1 - inf0)))
Reported by Pylint.
Line: 160
Column: 13
print(inf1.flatten())
print(inf0.flatten())
print(np.max(np.abs(inf1 - inf0)))
self.assertTrue(False)
if __name__ == "__main__":
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 8
Column: 1
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given, settings
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
caffe2/contrib/fakelowp/test/test_sls_4bit_nnpi_fp16.py
30 issues
Line: 5
Column: 1
import unittest
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
Reported by Pylint.
Line: 7
Column: 1
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
Reported by Pylint.
Line: 8
Column: 1
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 9
Column: 1
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
Reported by Pylint.
Line: 11
Column: 1
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
workspace.GlobalInit(["caffe2", "--glow_global_fp16=1",
Reported by Pylint.
Line: 12
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
workspace.GlobalInit(["caffe2", "--glow_global_fp16=1",
"--glow_global_fused_scale_offset_fp16=1",
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
workspace.GlobalInit(["caffe2", "--glow_global_fp16=1",
"--glow_global_fused_scale_offset_fp16=1",
"--glow_global_force_sls_fp16_accum=1"])
Reported by Pylint.
Line: 5
Column: 1
import unittest
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import unittest
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
Reported by Pylint.
torch/optim/__init__.py
30 issues
Line: 8
Column: 1
future.
"""
from .adadelta import Adadelta
from .adagrad import Adagrad
from .adam import Adam
from .adamw import AdamW
from .sparse_adam import SparseAdam
from .adamax import Adamax
Reported by Pylint.
Line: 9
Column: 1
"""
from .adadelta import Adadelta
from .adagrad import Adagrad
from .adam import Adam
from .adamw import AdamW
from .sparse_adam import SparseAdam
from .adamax import Adamax
from .asgd import ASGD
Reported by Pylint.
Line: 10
Column: 1
from .adadelta import Adadelta
from .adagrad import Adagrad
from .adam import Adam
from .adamw import AdamW
from .sparse_adam import SparseAdam
from .adamax import Adamax
from .asgd import ASGD
from .sgd import SGD
Reported by Pylint.
Line: 11
Column: 1
from .adadelta import Adadelta
from .adagrad import Adagrad
from .adam import Adam
from .adamw import AdamW
from .sparse_adam import SparseAdam
from .adamax import Adamax
from .asgd import ASGD
from .sgd import SGD
from .radam import RAdam
Reported by Pylint.
Line: 12
Column: 1
from .adagrad import Adagrad
from .adam import Adam
from .adamw import AdamW
from .sparse_adam import SparseAdam
from .adamax import Adamax
from .asgd import ASGD
from .sgd import SGD
from .radam import RAdam
from .rprop import Rprop
Reported by Pylint.
Line: 13
Column: 1
from .adam import Adam
from .adamw import AdamW
from .sparse_adam import SparseAdam
from .adamax import Adamax
from .asgd import ASGD
from .sgd import SGD
from .radam import RAdam
from .rprop import Rprop
from .rmsprop import RMSprop
Reported by Pylint.
Line: 14
Column: 1
from .adamw import AdamW
from .sparse_adam import SparseAdam
from .adamax import Adamax
from .asgd import ASGD
from .sgd import SGD
from .radam import RAdam
from .rprop import Rprop
from .rmsprop import RMSprop
from .optimizer import Optimizer
Reported by Pylint.
Line: 15
Column: 1
from .sparse_adam import SparseAdam
from .adamax import Adamax
from .asgd import ASGD
from .sgd import SGD
from .radam import RAdam
from .rprop import Rprop
from .rmsprop import RMSprop
from .optimizer import Optimizer
from .nadam import NAdam
Reported by Pylint.
Line: 16
Column: 1
from .adamax import Adamax
from .asgd import ASGD
from .sgd import SGD
from .radam import RAdam
from .rprop import Rprop
from .rmsprop import RMSprop
from .optimizer import Optimizer
from .nadam import NAdam
from .lbfgs import LBFGS
Reported by Pylint.
Line: 17
Column: 1
from .asgd import ASGD
from .sgd import SGD
from .radam import RAdam
from .rprop import Rprop
from .rmsprop import RMSprop
from .optimizer import Optimizer
from .nadam import NAdam
from .lbfgs import LBFGS
from . import lr_scheduler
Reported by Pylint.
torch/utils/benchmark/utils/sparse_fuzzer.py
30 issues
Line: 19
Column: 15
nnz: Optional[str] = None,
density: Optional[str] = None,
coalesced: Optional[str] = None,
dtype=torch.float32,
cuda=False
):
"""
Args:
name:
Reported by Pylint.
Line: 75
Column: 17
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
if dtype.is_floating_point:
v = torch.rand(size=v_size, dtype=dtype, device="cpu")
else:
v = torch.randint(1, 127, size=v_size, dtype=dtype, device="cpu")
i = torch.rand(sparse_dim, nnz, device="cpu")
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
Reported by Pylint.
Line: 77
Column: 17
if dtype.is_floating_point:
v = torch.rand(size=v_size, dtype=dtype, device="cpu")
else:
v = torch.randint(1, 127, size=v_size, dtype=dtype, device="cpu")
i = torch.rand(sparse_dim, nnz, device="cpu")
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
Reported by Pylint.
Line: 79
Column: 13
else:
v = torch.randint(1, 127, size=v_size, dtype=dtype, device="cpu")
i = torch.rand(sparse_dim, nnz, device="cpu")
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if not is_coalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
Reported by Pylint.
Line: 80
Column: 16
v = torch.randint(1, 127, size=v_size, dtype=dtype, device="cpu")
i = torch.rand(sparse_dim, nnz, device="cpu")
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if not is_coalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
Reported by Pylint.
Line: 81
Column: 18
i = torch.rand(sparse_dim, nnz, device="cpu")
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if not is_coalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
Reported by Pylint.
Line: 84
Column: 17
i = i.to(torch.long)
if not is_coalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size))
if is_coalesced:
x = x.coalesce()
Reported by Pylint.
Line: 84
Column: 31
i = i.to(torch.long)
if not is_coalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size))
if is_coalesced:
x = x.coalesce()
Reported by Pylint.
Line: 85
Column: 17
if not is_coalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size))
if is_coalesced:
x = x.coalesce()
return x
Reported by Pylint.
Line: 87
Column: 13
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size))
if is_coalesced:
x = x.coalesce()
return x
def _make_tensor(self, params, state):
Reported by Pylint.
caffe2/python/operator_test/channel_backprop_stats_op_test.py
30 issues
Line: 8
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given, settings
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 10
Column: 1
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given, settings
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
class TestChannelBackpropStats(serial.SerializedTestCase):
Reported by Pylint.
Line: 23
Column: 76
**hu.gcs
)
@settings(deadline=10000)
def testChannelBackpropStats(self, size, inputChannels, batchSize, gc, dc):
op = core.CreateOperator(
"ChannelBackpropStats",
["X", "mean", "invStdDev", "outputGrad"],
["scaleGrad", "biasGrad"],
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given, settings
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 9
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given, settings
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 12
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
class TestChannelBackpropStats(serial.SerializedTestCase):
@given(
size=st.integers(7, 10),
Reported by Pylint.
Line: 15
Column: 1
import unittest
class TestChannelBackpropStats(serial.SerializedTestCase):
@given(
size=st.integers(7, 10),
inputChannels=st.integers(1, 10),
batchSize=st.integers(1, 3),
**hu.gcs
Reported by Pylint.
Line: 22
Column: 5
batchSize=st.integers(1, 3),
**hu.gcs
)
@settings(deadline=10000)
def testChannelBackpropStats(self, size, inputChannels, batchSize, gc, dc):
op = core.CreateOperator(
"ChannelBackpropStats",
["X", "mean", "invStdDev", "outputGrad"],
Reported by Pylint.
Line: 22
Column: 5
batchSize=st.integers(1, 3),
**hu.gcs
)
@settings(deadline=10000)
def testChannelBackpropStats(self, size, inputChannels, batchSize, gc, dc):
op = core.CreateOperator(
"ChannelBackpropStats",
["X", "mean", "invStdDev", "outputGrad"],
Reported by Pylint.
Line: 22
Column: 5
batchSize=st.integers(1, 3),
**hu.gcs
)
@settings(deadline=10000)
def testChannelBackpropStats(self, size, inputChannels, batchSize, gc, dc):
op = core.CreateOperator(
"ChannelBackpropStats",
["X", "mean", "invStdDev", "outputGrad"],
Reported by Pylint.