The following issues were found
caffe2/python/operator_test/concat_split_op_test.py
27 issues
Line: 9
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 10
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
@st.composite
Reported by Pylint.
Line: 128
Column: 23
**kwargs
)
def split_ref(input, split=split_info):
s = np.cumsum([0] + list(split))
return [
np.array(input.take(np.arange(s[i], s[i + 1]), axis=axis))
for i in range(len(split))
]
Reported by Pylint.
Line: 156
Column: 71
**hu.gcs
)
@settings(deadline=10000)
def test_split_by_lengths(self, inputs, split_by_scaling_lengths, gc, dc):
data, lengths = inputs
len_len = len(lengths)
def _find_factor_simple(x):
for i in [2, 3, 5, 7, 9, 11]:
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
Reported by Pylint.
Line: 12
Column: 1
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
@st.composite
def _tensor_splits(draw, add_axis=False):
"""Generates (axis, split_info, tensor_splits) tuples."""
Reported by Pylint.
Line: 20
Column: 5
"""Generates (axis, split_info, tensor_splits) tuples."""
tensor = draw(hu.tensor(min_value=4)) # Each dim has at least 4 elements.
axis = draw(st.integers(-len(tensor.shape), len(tensor.shape) - 1))
if add_axis:
# Simple case: get individual slices along one axis, where each of them
# is (N-1)-dimensional. The axis will be added back upon concatenation.
return (
axis,
np.ones(tensor.shape[axis], dtype=np.int32),
Reported by Pylint.
Line: 47
Column: 1
)
class TestConcatSplitOps(serial.SerializedTestCase):
@serial.given(tensor_splits=_tensor_splits(),
**hu.gcs)
def test_concat(self, tensor_splits, gc, dc):
axis, _, splits = tensor_splits
Reported by Pylint.
Line: 50
Column: 5
class TestConcatSplitOps(serial.SerializedTestCase):
@serial.given(tensor_splits=_tensor_splits(),
**hu.gcs)
def test_concat(self, tensor_splits, gc, dc):
axis, _, splits = tensor_splits
op = core.CreateOperator(
"Concat",
['X_{}'.format(i) for i in range(len(splits))],
Reported by Pylint.
Line: 50
Column: 5
class TestConcatSplitOps(serial.SerializedTestCase):
@serial.given(tensor_splits=_tensor_splits(),
**hu.gcs)
def test_concat(self, tensor_splits, gc, dc):
axis, _, splits = tensor_splits
op = core.CreateOperator(
"Concat",
['X_{}'.format(i) for i in range(len(splits))],
Reported by Pylint.
torch/optim/rmsprop.py
27 issues
Line: 2
Column: 1
import torch
from . import _functional as F
from .optimizer import Optimizer
class RMSprop(Optimizer):
r"""Implements RMSprop algorithm.
Proposed by G. Hinton in his
Reported by Pylint.
Line: 3
Column: 1
import torch
from . import _functional as F
from .optimizer import Optimizer
class RMSprop(Optimizer):
r"""Implements RMSprop algorithm.
Proposed by G. Hinton in his
Reported by Pylint.
Line: 90
Column: 77
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
Reported by Pylint.
Line: 90
Column: 43
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
Reported by Pylint.
Line: 92
Column: 52
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
Reported by Pylint.
Line: 92
Column: 86
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
Reported by Pylint.
Line: 94
Column: 45
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
if group['momentum'] > 0:
momentum_buffer_list.append(state['momentum_buffer'])
Reported by Pylint.
Line: 94
Column: 79
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
if group['momentum'] > 0:
momentum_buffer_list.append(state['momentum_buffer'])
Reported by Pylint.
Line: 1
Column: 1
import torch
from . import _functional as F
from .optimizer import Optimizer
class RMSprop(Optimizer):
r"""Implements RMSprop algorithm.
Proposed by G. Hinton in his
Reported by Pylint.
Line: 35
Column: 1
"""
def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= momentum:
Reported by Pylint.
benchmarks/operator_benchmark/benchmark_caffe2.py
27 issues
Line: 1
Column: 1
from caffe2.python import workspace
from caffe2.python import core
from caffe2.proto import caffe2_pb2
import benchmark_utils
from collections import namedtuple
from benchmark_test_generator import _register_test
"""Caffe2 performance microbenchmarks.
Reported by Pylint.
Line: 2
Column: 1
from caffe2.python import workspace
from caffe2.python import core
from caffe2.proto import caffe2_pb2
import benchmark_utils
from collections import namedtuple
from benchmark_test_generator import _register_test
"""Caffe2 performance microbenchmarks.
Reported by Pylint.
Line: 3
Column: 1
from caffe2.python import workspace
from caffe2.python import core
from caffe2.proto import caffe2_pb2
import benchmark_utils
from collections import namedtuple
from benchmark_test_generator import _register_test
"""Caffe2 performance microbenchmarks.
Reported by Pylint.
Line: 4
Column: 1
from caffe2.python import workspace
from caffe2.python import core
from caffe2.proto import caffe2_pb2
import benchmark_utils
from collections import namedtuple
from benchmark_test_generator import _register_test
"""Caffe2 performance microbenchmarks.
Reported by Pylint.
Line: 6
Column: 1
from caffe2.proto import caffe2_pb2
import benchmark_utils
from collections import namedtuple
from benchmark_test_generator import _register_test
"""Caffe2 performance microbenchmarks.
This module contains Caffe2-specific functionalities for performance
microbenchmarks.
Reported by Pylint.
Line: 125
Column: 76
with core.DeviceScope(self.op_bench.dev):
op = self.op_bench.forward()
if not workspace.RunOperatorMultiple(op, num_runs):
raise ValueError("Unable to run operator test case: {}".format(self.test_name))
def run_backward(self, num_runs, print_per_iter=False):
""" Run the backward path of an operator in a loop
"""
with core.DeviceScope(self.op_bench.dev):
Reported by Pylint.
Line: 133
Column: 85
with core.DeviceScope(self.op_bench.dev):
op = self.op_bench.backward()
if not workspace.RunOperatorMultiple(op, num_runs):
raise ValueError("Unable to run operator gradient test case: {}".format(self.test_name))
def _print_per_iter(self):
pass
Reported by Pylint.
Line: 187
Column: 23
op_metadata.input_dims,
op_metadata.input_types,
str(op_metadata.args))
test_config = TestConfig(test_name, input_config, tags, run_backward=False)
if op is not None:
create_caffe2_op_test_case(
op,
test_config)
Reported by Pylint.
Line: 8
Column: 1
from collections import namedtuple
from benchmark_test_generator import _register_test
"""Caffe2 performance microbenchmarks.
This module contains Caffe2-specific functionalities for performance
microbenchmarks.
"""
Reported by Pylint.
Line: 37
Column: 13
raise ValueError("Missing attrs in configs")
if 'cuda' in device:
self.dev = core.DeviceOption(caffe2_pb2.CUDA, 0)
else:
self.dev = core.DeviceOption(caffe2_pb2.CPU)
return self.dev
def tensor(self, shapes, dtype='float32', device='cpu'):
Reported by Pylint.
torch/distributions/binomial.py
27 issues
Line: 55
Column: 23
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Binomial, _instance)
batch_shape = torch.Size(batch_shape)
new.total_count = self.total_count.expand(batch_shape)
if 'probs' in self.__dict__:
new.probs = self.probs.expand(batch_shape)
new._param = new.probs
if 'logits' in self.__dict__:
Reported by Pylint.
Line: 83
Column: 5
return self.total_count * self.probs * (1 - self.probs)
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
Reported by Pylint.
Line: 87
Column: 5
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
@property
def param_shape(self):
return self._param.size()
Reported by Pylint.
Line: 94
Column: 35
def param_shape(self):
return self._param.size()
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.binomial(self.total_count.expand(shape), self.probs.expand(shape))
def log_prob(self, value):
Reported by Pylint.
Line: 97
Column: 20
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.binomial(self.total_count.expand(shape), self.probs.expand(shape))
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
log_factorial_n = torch.lgamma(self.total_count + 1)
Reported by Pylint.
Line: 102
Column: 27
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
log_factorial_n = torch.lgamma(self.total_count + 1)
log_factorial_k = torch.lgamma(value + 1)
log_factorial_nmk = torch.lgamma(self.total_count - value + 1)
# k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p)
# (case logit < 0) = k * logit - n * log1p(e^logit)
# (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p)
Reported by Pylint.
Line: 103
Column: 27
if self._validate_args:
self._validate_sample(value)
log_factorial_n = torch.lgamma(self.total_count + 1)
log_factorial_k = torch.lgamma(value + 1)
log_factorial_nmk = torch.lgamma(self.total_count - value + 1)
# k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p)
# (case logit < 0) = k * logit - n * log1p(e^logit)
# (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p)
# = k * logit - n * logit - n * log1p(e^-logit)
Reported by Pylint.
Line: 104
Column: 29
self._validate_sample(value)
log_factorial_n = torch.lgamma(self.total_count + 1)
log_factorial_k = torch.lgamma(value + 1)
log_factorial_nmk = torch.lgamma(self.total_count - value + 1)
# k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p)
# (case logit < 0) = k * logit - n * log1p(e^logit)
# (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p)
# = k * logit - n * logit - n * log1p(e^-logit)
# (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|)
Reported by Pylint.
Line: 111
Column: 71
# = k * logit - n * logit - n * log1p(e^-logit)
# (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|)
normalize_term = (self.total_count * _clamp_by_zero(self.logits)
+ self.total_count * torch.log1p(torch.exp(-torch.abs(self.logits)))
- log_factorial_n)
return value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term
def enumerate_support(self, expand=True):
total_count = int(self.total_count.max())
Reported by Pylint.
Line: 111
Column: 48
# = k * logit - n * logit - n * log1p(e^-logit)
# (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|)
normalize_term = (self.total_count * _clamp_by_zero(self.logits)
+ self.total_count * torch.log1p(torch.exp(-torch.abs(self.logits)))
- log_factorial_n)
return value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term
def enumerate_support(self, expand=True):
total_count = int(self.total_count.max())
Reported by Pylint.
torch/fx/experimental/normalize.py
27 issues
Line: 15
Column: 1
create_type_hint,
)
from .schema_type_annotation import AnnotateTypesWithSchema
class NormalizeArgs(Transformer):
"""
Normalize arguments to Python targets. This means that
Reported by Pylint.
Line: 128
Column: 9
binary_magic_method_remap: Dict[
Callable[[Any, Any], Any], Callable[[Any, Any], Any]
] = {
torch.add: operator.add,
torch.mul: operator.mul,
torch.sub: operator.sub,
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
Reported by Pylint.
Line: 129
Column: 9
Callable[[Any, Any], Any], Callable[[Any, Any], Any]
] = {
torch.add: operator.add,
torch.mul: operator.mul,
torch.sub: operator.sub,
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
Reported by Pylint.
Line: 130
Column: 9
] = {
torch.add: operator.add,
torch.mul: operator.mul,
torch.sub: operator.sub,
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
Reported by Pylint.
Line: 131
Column: 9
torch.add: operator.add,
torch.mul: operator.mul,
torch.sub: operator.sub,
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
torch.lt: operator.lt,
Reported by Pylint.
Line: 132
Column: 9
torch.mul: operator.mul,
torch.sub: operator.sub,
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
torch.lt: operator.lt,
torch.le: operator.le,
Reported by Pylint.
Line: 133
Column: 9
torch.sub: operator.sub,
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
torch.lt: operator.lt,
torch.le: operator.le,
torch.gt: operator.gt,
Reported by Pylint.
Line: 134
Column: 9
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
torch.lt: operator.lt,
torch.le: operator.le,
torch.gt: operator.gt,
torch.ge: operator.ge,
Reported by Pylint.
Line: 135
Column: 9
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
torch.lt: operator.lt,
torch.le: operator.le,
torch.gt: operator.gt,
torch.ge: operator.ge,
}
Reported by Pylint.
Line: 136
Column: 9
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
torch.lt: operator.lt,
torch.le: operator.le,
torch.gt: operator.gt,
torch.ge: operator.ge,
}
Reported by Pylint.
caffe2/experiments/python/sparse_reshape_op_test.py
27 issues
Line: 22
Column: 1
import numpy as np
from scipy.sparse import coo_matrix
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 28
Column: 1
from caffe2.python.test_util import TestCase
def test_reshape(old_shape, new_shape, stride_only=False):
blob_in0 = 'col'
blob_out0 = 'col_out'
blob_in1 = 'row'
blob_out1 = 'row_out'
Reported by Pylint.
Line: 28
Column: 1
from caffe2.python.test_util import TestCase
def test_reshape(old_shape, new_shape, stride_only=False):
blob_in0 = 'col'
blob_out0 = 'col_out'
blob_in1 = 'row'
blob_out1 = 'row_out'
Reported by Pylint.
Line: 37
Column: 5
old_shape_for_op = (-1, old_shape[1]) if stride_only else old_shape
op = core.CreateOperator('SparseMatrixReshape',
[blob_in0, blob_in1],
[blob_out0, blob_out1],
old_shape=old_shape_for_op,
new_shape=new_shape)
Reported by Pylint.
Line: 43
Column: 5
old_shape=old_shape_for_op,
new_shape=new_shape)
A = np.random.random_sample(old_shape)
A[np.random.random_sample(old_shape) > .5] = 0
A_coo = coo_matrix(A)
old_row, old_col = A_coo.row, A_coo.col
workspace.FeedBlob(blob_in0, old_col.astype(np.int64))
Reported by Pylint.
Line: 45
Column: 5
A = np.random.random_sample(old_shape)
A[np.random.random_sample(old_shape) > .5] = 0
A_coo = coo_matrix(A)
old_row, old_col = A_coo.row, A_coo.col
workspace.FeedBlob(blob_in0, old_col.astype(np.int64))
workspace.FeedBlob(blob_in1, old_row.astype(np.int32))
Reported by Pylint.
Line: 53
Column: 5
workspace.RunOperatorOnce(op)
A_new_coo = coo_matrix(A.reshape(new_shape))
new_row, new_col = A_new_coo.row, A_new_coo.col
col_out = workspace.FetchBlob(blob_out0)
row_out = workspace.FetchBlob(blob_out1)
Reported by Pylint.
Line: 63
Column: 1
np.testing.assert_array_equal(row_out, new_row)
class TestSparseMatrixReshapeOp(TestCase):
def test_basic_reshape(self):
test_reshape(old_shape=(3, 4), new_shape=(4, 3))
def test_missing_dim(self):
test_reshape(old_shape=(2, 8), new_shape=(-1, 4))
Reported by Pylint.
Line: 64
Column: 5
class TestSparseMatrixReshapeOp(TestCase):
def test_basic_reshape(self):
test_reshape(old_shape=(3, 4), new_shape=(4, 3))
def test_missing_dim(self):
test_reshape(old_shape=(2, 8), new_shape=(-1, 4))
Reported by Pylint.
torch/nn/quantized/modules/__init__.py
27 issues
Line: 4
Column: 1
import torch
from torch.nn.modules.pooling import MaxPool2d
from .activation import ReLU6, Hardswish, ELU, LeakyReLU, Sigmoid
from .batchnorm import BatchNorm2d, BatchNorm3d
from .normalization import LayerNorm, GroupNorm, InstanceNorm1d, \
InstanceNorm2d, InstanceNorm3d
from .conv import _ConvNd, Conv1d, Conv2d, Conv3d
from .conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
Reported by Pylint.
Line: 5
Column: 1
from torch.nn.modules.pooling import MaxPool2d
from .activation import ReLU6, Hardswish, ELU, LeakyReLU, Sigmoid
from .batchnorm import BatchNorm2d, BatchNorm3d
from .normalization import LayerNorm, GroupNorm, InstanceNorm1d, \
InstanceNorm2d, InstanceNorm3d
from .conv import _ConvNd, Conv1d, Conv2d, Conv3d
from .conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
from .linear import Linear
Reported by Pylint.
Line: 6
Column: 1
from .activation import ReLU6, Hardswish, ELU, LeakyReLU, Sigmoid
from .batchnorm import BatchNorm2d, BatchNorm3d
from .normalization import LayerNorm, GroupNorm, InstanceNorm1d, \
InstanceNorm2d, InstanceNorm3d
from .conv import _ConvNd, Conv1d, Conv2d, Conv3d
from .conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
from .linear import Linear
from .embedding_ops import Embedding, EmbeddingBag
Reported by Pylint.
Line: 8
Column: 1
from .batchnorm import BatchNorm2d, BatchNorm3d
from .normalization import LayerNorm, GroupNorm, InstanceNorm1d, \
InstanceNorm2d, InstanceNorm3d
from .conv import _ConvNd, Conv1d, Conv2d, Conv3d
from .conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
from .linear import Linear
from .embedding_ops import Embedding, EmbeddingBag
from .functional_modules import FloatFunctional, FXFloatFunctional, QFunctional
Reported by Pylint.
Line: 9
Column: 1
from .normalization import LayerNorm, GroupNorm, InstanceNorm1d, \
InstanceNorm2d, InstanceNorm3d
from .conv import _ConvNd, Conv1d, Conv2d, Conv3d
from .conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
from .linear import Linear
from .embedding_ops import Embedding, EmbeddingBag
from .functional_modules import FloatFunctional, FXFloatFunctional, QFunctional
Reported by Pylint.
Line: 10
Column: 1
InstanceNorm2d, InstanceNorm3d
from .conv import _ConvNd, Conv1d, Conv2d, Conv3d
from .conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
from .linear import Linear
from .embedding_ops import Embedding, EmbeddingBag
from .functional_modules import FloatFunctional, FXFloatFunctional, QFunctional
Reported by Pylint.
Line: 11
Column: 1
from .conv import _ConvNd, Conv1d, Conv2d, Conv3d
from .conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
from .linear import Linear
from .embedding_ops import Embedding, EmbeddingBag
from .functional_modules import FloatFunctional, FXFloatFunctional, QFunctional
class Quantize(torch.nn.Module):
Reported by Pylint.
Line: 13
Column: 1
from .linear import Linear
from .embedding_ops import Embedding, EmbeddingBag
from .functional_modules import FloatFunctional, FXFloatFunctional, QFunctional
class Quantize(torch.nn.Module):
r"""Quantizes an incoming tensor
Reported by Pylint.
Line: 45
Column: 39
def __init__(self, scale, zero_point, dtype, factory_kwargs=None):
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
super(Quantize, self).__init__()
self.register_buffer('scale', torch.tensor([scale], **factory_kwargs))
self.register_buffer('zero_point',
torch.tensor([zero_point], dtype=torch.long,
**{k: v for k, v in factory_kwargs.items() if k != 'dtype'}))
self.dtype = dtype
Reported by Pylint.
Line: 47
Column: 30
super(Quantize, self).__init__()
self.register_buffer('scale', torch.tensor([scale], **factory_kwargs))
self.register_buffer('zero_point',
torch.tensor([zero_point], dtype=torch.long,
**{k: v for k, v in factory_kwargs.items() if k != 'dtype'}))
self.dtype = dtype
def forward(self, X):
return torch.quantize_per_tensor(X, float(self.scale),
Reported by Pylint.
torch/optim/_multi_tensor/nadam.py
27 issues
Line: 2
Column: 1
import torch
from . import _functional as F
from ..optimizer import Optimizer
from collections import defaultdict
class NAdam(Optimizer):
r"""Implements NAdam algorithm with multi tensor APIs.
It has been proposed in `Incorporating Nesterov Momentum into Adam`_.
Reported by Pylint.
Line: 3
Column: 1
import torch
from . import _functional as F
from ..optimizer import Optimizer
from collections import defaultdict
class NAdam(Optimizer):
r"""Implements NAdam algorithm with multi tensor APIs.
It has been proposed in `Incorporating Nesterov Momentum into Adam`_.
Reported by Pylint.
Line: 80
Column: 40
state['step'] = 0
state['mu_product'] = 1.
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
exp_avg_sq.append(state['exp_avg_sq'])
Reported by Pylint.
Line: 80
Column: 74
state['step'] = 0
state['mu_product'] = 1.
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
exp_avg_sq.append(state['exp_avg_sq'])
Reported by Pylint.
Line: 82
Column: 43
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
exp_avg_sq.append(state['exp_avg_sq'])
state['step'] += 1
Reported by Pylint.
Line: 82
Column: 77
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
exp_avg_sq.append(state['exp_avg_sq'])
state['step'] += 1
Reported by Pylint.
Line: 130
Column: 21
for _, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
torch._foreach_zero_(grads)
Reported by Pylint.
Line: 109
Column: 3
return loss
# TODO: refactor to a base class once foreach ops are in a good shape.
def zero_grad(self, set_to_none: bool = False):
per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list))
for group in self.param_groups:
for p in group['params']:
if p.grad is not None:
Reported by Pylint.
Line: 130
Column: 21
for _, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
torch._foreach_zero_(grads)
Reported by Pylint.
Line: 1
Column: 1
import torch
from . import _functional as F
from ..optimizer import Optimizer
from collections import defaultdict
class NAdam(Optimizer):
r"""Implements NAdam algorithm with multi tensor APIs.
It has been proposed in `Incorporating Nesterov Momentum into Adam`_.
Reported by Pylint.
caffe2/python/parallelize_bmuf_distributed_test.py
26 issues
Line: 13
Column: 1
import shutil
import logging
from hypothesis import given, settings
import hypothesis.strategies as st
from caffe2.python import workspace
log = logging.getLogger("parallelize_bmuf_distributed_test")
Reported by Pylint.
Line: 14
Column: 1
import logging
from hypothesis import given, settings
import hypothesis.strategies as st
from caffe2.python import workspace
log = logging.getLogger("parallelize_bmuf_distributed_test")
log.setLevel(logging.INFO)
Reported by Pylint.
Line: 64
Column: 28
model.param_init_net.UniformFill([], ["sync_num"], shape=[1])
return [loss]
def _input_builder_fun(model):
return None
def _param_update_fun(model):
ITER = model.Iter("ITER")
LR = model.net.LearningRate(
Reported by Pylint.
Line: 92
Column: 17
batch_per_device = batch_size // len(devices)
for (j, g) in enumerate(devices):
st = j * batch_per_device
en = st + batch_per_device
data = full_data[st:en, :].astype(np.float32)
labels = full_labels[st:en].astype(np.float32)
with core.DeviceScope(core.DeviceOption(device_type, g)):
workspace.FeedBlob("{}_{}/data".format(device_prefix, g), data)
Reported by Pylint.
Line: 184
Column: 26
"{}_{}/fc_w_g".format(device_prefix, _device_pid(0, process_id)))
results['b_g_'] = b_g_
results['w_g_'] = w_g_
workspace.RunNetOnce(model._global_model_param_updates_net)
# g_b = (b_0_ + b_1_) / 2 - b_g_
# g_w = (w_0_ + w_1_) / 2 - w_g_
v_b = workspace.FetchBlob(
"{}_{}/fc_b_v".format(device_prefix, _device_pid(0, process_id)))
Reported by Pylint.
Line: 1
Column: 1
from multiprocessing import Process, Manager
import numpy as np
import unittest
import tempfile
Reported by Pylint.
Line: 8
Column: 1
from multiprocessing import Process, Manager
import numpy as np
import unittest
import tempfile
import shutil
import logging
from hypothesis import given, settings
Reported by Pylint.
Line: 9
Column: 1
import numpy as np
import unittest
import tempfile
import shutil
import logging
from hypothesis import given, settings
import hypothesis.strategies as st
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
import unittest
import tempfile
import shutil
import logging
from hypothesis import given, settings
import hypothesis.strategies as st
Reported by Pylint.
Line: 11
Column: 1
import unittest
import tempfile
import shutil
import logging
from hypothesis import given, settings
import hypothesis.strategies as st
from caffe2.python import workspace
Reported by Pylint.
caffe2/python/operator_test/lengths_top_k_ops_test.py
26 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestLengthsTopKOps(serial.SerializedTestCase):
@serial.given(N=st.integers(min_value=0, max_value=10),
Reported by Pylint.
Line: 54
Column: 27
X = np.array([], dtype=np.float32)
op = core.CreateOperator("LengthsTopK", ["X", "Y"], ["values", "indices"], k=K)
def lengths_top_k(X, lens):
return (np.zeros((N, K), dtype=np.float32),
-1 * np.ones((N, K), dtype=np.int32))
self.assertDeviceChecks(dc, op, [X, lens], [0, 1])
self.assertReferenceChecks(gc, op, [X, lens], lengths_top_k)
Reported by Pylint.
Line: 54
Column: 30
X = np.array([], dtype=np.float32)
op = core.CreateOperator("LengthsTopK", ["X", "Y"], ["values", "indices"], k=K)
def lengths_top_k(X, lens):
return (np.zeros((N, K), dtype=np.float32),
-1 * np.ones((N, K), dtype=np.int32))
self.assertDeviceChecks(dc, op, [X, lens], [0, 1])
self.assertReferenceChecks(gc, op, [X, lens], lengths_top_k)
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 14
Column: 1
import numpy as np
class TestLengthsTopKOps(serial.SerializedTestCase):
@serial.given(N=st.integers(min_value=0, max_value=10),
K=st.integers(min_value=1, max_value=10),
**hu.gcs_cpu_only)
def test_lengths_top_k_op(self, N, K, gc, dc):
lens = np.random.randint(low=1, high=2 * K + 1, size=N).astype(np.int32)
Reported by Pylint.
Line: 18
Column: 5
@serial.given(N=st.integers(min_value=0, max_value=10),
K=st.integers(min_value=1, max_value=10),
**hu.gcs_cpu_only)
def test_lengths_top_k_op(self, N, K, gc, dc):
lens = np.random.randint(low=1, high=2 * K + 1, size=N).astype(np.int32)
X = []
for i in lens:
X.extend(x / 100.0 for x in range(0, 6 * i, 6))
X = np.array(X, dtype=np.float32)
Reported by Pylint.
Line: 18
Column: 5
@serial.given(N=st.integers(min_value=0, max_value=10),
K=st.integers(min_value=1, max_value=10),
**hu.gcs_cpu_only)
def test_lengths_top_k_op(self, N, K, gc, dc):
lens = np.random.randint(low=1, high=2 * K + 1, size=N).astype(np.int32)
X = []
for i in lens:
X.extend(x / 100.0 for x in range(0, 6 * i, 6))
X = np.array(X, dtype=np.float32)
Reported by Pylint.
Line: 18
Column: 5
@serial.given(N=st.integers(min_value=0, max_value=10),
K=st.integers(min_value=1, max_value=10),
**hu.gcs_cpu_only)
def test_lengths_top_k_op(self, N, K, gc, dc):
lens = np.random.randint(low=1, high=2 * K + 1, size=N).astype(np.int32)
X = []
for i in lens:
X.extend(x / 100.0 for x in range(0, 6 * i, 6))
X = np.array(X, dtype=np.float32)
Reported by Pylint.
Line: 18
Column: 5
@serial.given(N=st.integers(min_value=0, max_value=10),
K=st.integers(min_value=1, max_value=10),
**hu.gcs_cpu_only)
def test_lengths_top_k_op(self, N, K, gc, dc):
lens = np.random.randint(low=1, high=2 * K + 1, size=N).astype(np.int32)
X = []
for i in lens:
X.extend(x / 100.0 for x in range(0, 6 * i, 6))
X = np.array(X, dtype=np.float32)
Reported by Pylint.