The following issues were found
caffe2/python/operator_test/trigonometric_op_test.py
27 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import numpy as np
import unittest
Reported by Pylint.
Line: 44
Column: 50
def test_tan(self, X, gc, dc):
self.assertTrigonometricChecks("Tan", X, lambda x: (np.tan(X),), gc, dc)
def assertTrigonometricChecks(self, op_name, input, reference, gc, dc):
op = core.CreateOperator(op_name, ["X"], ["Y"])
self.assertReferenceChecks(gc, op, [input], reference)
self.assertDeviceChecks(dc, op, [input], [0])
self.assertGradientChecks(gc, op, [input], 0, [0])
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 12
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
import numpy as np
import unittest
class TestTrigonometricOp(serial.SerializedTestCase):
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
Reported by Pylint.
Line: 15
Column: 1
import unittest
class TestTrigonometricOp(serial.SerializedTestCase):
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_acos(self, X, gc, dc):
Reported by Pylint.
Line: 20
Column: 5
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_acos(self, X, gc, dc):
self.assertTrigonometricChecks("Acos", X, lambda x: (np.arccos(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
Reported by Pylint.
Line: 20
Column: 5
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_acos(self, X, gc, dc):
self.assertTrigonometricChecks("Acos", X, lambda x: (np.arccos(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
Reported by Pylint.
Line: 20
Column: 5
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_acos(self, X, gc, dc):
self.assertTrigonometricChecks("Acos", X, lambda x: (np.arccos(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
Reported by Pylint.
Line: 20
Column: 5
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_acos(self, X, gc, dc):
self.assertTrigonometricChecks("Acos", X, lambda x: (np.arccos(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
Reported by Pylint.
Line: 27
Column: 5
X=hu.tensor(elements=hu.floats(min_value=-0.7, max_value=0.7)),
**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_asin(self, X, gc, dc):
self.assertTrigonometricChecks("Asin", X, lambda x: (np.arcsin(X),), gc, dc)
@given(
X=hu.tensor(elements=hu.floats(min_value=-100, max_value=100)),
**hu.gcs)
Reported by Pylint.
torch/distributions/binomial.py
27 issues
Line: 55
Column: 23
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Binomial, _instance)
batch_shape = torch.Size(batch_shape)
new.total_count = self.total_count.expand(batch_shape)
if 'probs' in self.__dict__:
new.probs = self.probs.expand(batch_shape)
new._param = new.probs
if 'logits' in self.__dict__:
Reported by Pylint.
Line: 83
Column: 5
return self.total_count * self.probs * (1 - self.probs)
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
Reported by Pylint.
Line: 87
Column: 5
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
@property
def param_shape(self):
return self._param.size()
Reported by Pylint.
Line: 94
Column: 35
def param_shape(self):
return self._param.size()
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.binomial(self.total_count.expand(shape), self.probs.expand(shape))
def log_prob(self, value):
Reported by Pylint.
Line: 97
Column: 20
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.binomial(self.total_count.expand(shape), self.probs.expand(shape))
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
log_factorial_n = torch.lgamma(self.total_count + 1)
Reported by Pylint.
Line: 102
Column: 27
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
log_factorial_n = torch.lgamma(self.total_count + 1)
log_factorial_k = torch.lgamma(value + 1)
log_factorial_nmk = torch.lgamma(self.total_count - value + 1)
# k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p)
# (case logit < 0) = k * logit - n * log1p(e^logit)
# (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p)
Reported by Pylint.
Line: 103
Column: 27
if self._validate_args:
self._validate_sample(value)
log_factorial_n = torch.lgamma(self.total_count + 1)
log_factorial_k = torch.lgamma(value + 1)
log_factorial_nmk = torch.lgamma(self.total_count - value + 1)
# k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p)
# (case logit < 0) = k * logit - n * log1p(e^logit)
# (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p)
# = k * logit - n * logit - n * log1p(e^-logit)
Reported by Pylint.
Line: 104
Column: 29
self._validate_sample(value)
log_factorial_n = torch.lgamma(self.total_count + 1)
log_factorial_k = torch.lgamma(value + 1)
log_factorial_nmk = torch.lgamma(self.total_count - value + 1)
# k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p)
# (case logit < 0) = k * logit - n * log1p(e^logit)
# (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p)
# = k * logit - n * logit - n * log1p(e^-logit)
# (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|)
Reported by Pylint.
Line: 111
Column: 71
# = k * logit - n * logit - n * log1p(e^-logit)
# (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|)
normalize_term = (self.total_count * _clamp_by_zero(self.logits)
+ self.total_count * torch.log1p(torch.exp(-torch.abs(self.logits)))
- log_factorial_n)
return value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term
def enumerate_support(self, expand=True):
total_count = int(self.total_count.max())
Reported by Pylint.
Line: 111
Column: 48
# = k * logit - n * logit - n * log1p(e^-logit)
# (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|)
normalize_term = (self.total_count * _clamp_by_zero(self.logits)
+ self.total_count * torch.log1p(torch.exp(-torch.abs(self.logits)))
- log_factorial_n)
return value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term
def enumerate_support(self, expand=True):
total_count = int(self.total_count.max())
Reported by Pylint.
torch/fx/experimental/normalize.py
27 issues
Line: 15
Column: 1
create_type_hint,
)
from .schema_type_annotation import AnnotateTypesWithSchema
class NormalizeArgs(Transformer):
"""
Normalize arguments to Python targets. This means that
Reported by Pylint.
Line: 128
Column: 9
binary_magic_method_remap: Dict[
Callable[[Any, Any], Any], Callable[[Any, Any], Any]
] = {
torch.add: operator.add,
torch.mul: operator.mul,
torch.sub: operator.sub,
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
Reported by Pylint.
Line: 129
Column: 9
Callable[[Any, Any], Any], Callable[[Any, Any], Any]
] = {
torch.add: operator.add,
torch.mul: operator.mul,
torch.sub: operator.sub,
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
Reported by Pylint.
Line: 130
Column: 9
] = {
torch.add: operator.add,
torch.mul: operator.mul,
torch.sub: operator.sub,
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
Reported by Pylint.
Line: 131
Column: 9
torch.add: operator.add,
torch.mul: operator.mul,
torch.sub: operator.sub,
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
torch.lt: operator.lt,
Reported by Pylint.
Line: 132
Column: 9
torch.mul: operator.mul,
torch.sub: operator.sub,
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
torch.lt: operator.lt,
torch.le: operator.le,
Reported by Pylint.
Line: 133
Column: 9
torch.sub: operator.sub,
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
torch.lt: operator.lt,
torch.le: operator.le,
torch.gt: operator.gt,
Reported by Pylint.
Line: 134
Column: 9
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
torch.lt: operator.lt,
torch.le: operator.le,
torch.gt: operator.gt,
torch.ge: operator.ge,
Reported by Pylint.
Line: 135
Column: 9
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
torch.lt: operator.lt,
torch.le: operator.le,
torch.gt: operator.gt,
torch.ge: operator.ge,
}
Reported by Pylint.
Line: 136
Column: 9
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
torch.lt: operator.lt,
torch.le: operator.le,
torch.gt: operator.gt,
torch.ge: operator.ge,
}
Reported by Pylint.
caffe2/python/layers/fc_with_bootstrap.py
27 issues
Line: 128
Column: 9
preds_b_blob
)
"""
bootstrapped_FCs = []
output_schema = schema.Struct()
for i in range(num_bootstrap):
output_schema += schema.Struct(
(
"bootstrap_iteration_{}/indices".format(i),
Reported by Pylint.
Line: 263
Column: 5
else:
raise Exception("unsupported FC type version {}".format(version))
def _add_ops(self, net, features, iteration, params, version):
"""
Args:
params: the weight and bias, passed by either add_ops or
add_train_ops function
Reported by Pylint.
Line: 307
Column: 13
for i in range(self.num_bootstrap):
# these are dummy indices, not to be used anywhere
indices = self._generate_bootstrapped_indices(
net=net,
copied_cur_layer=self.input_record.field_blobs()[0],
iteration=i,
)
Reported by Pylint.
Line: 1
Column: 1
## @package fc_with_bootstrap
# Module caffe2.python.layers.fc_with_bootstrap
import math
import numpy as np
from caffe2.python import core, schema
from caffe2.python.helpers.arg_scope import get_current_scope
Reported by Pylint.
Line: 14
Column: 1
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
def get_fc_predictor_version(fc_version):
assert fc_version in ["fp32"], (
"Only support fp32 for the fully connected layer "
"in the predictor net, the provided FC precision is {}".format(fc_version)
)
return fc_version
Reported by Pylint.
Line: 15
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def get_fc_predictor_version(fc_version):
assert fc_version in ["fp32"], (
"Only support fp32 for the fully connected layer "
"in the predictor net, the provided FC precision is {}".format(fc_version)
)
return fc_version
Reported by Bandit.
Line: 22
Column: 1
return fc_version
class FCWithBootstrap(SamplingTrainableMixin, ModelLayer):
def __init__(
self,
model,
input_record,
output_dims,
Reported by Pylint.
Line: 22
Column: 1
return fc_version
class FCWithBootstrap(SamplingTrainableMixin, ModelLayer):
def __init__(
self,
model,
input_record,
output_dims,
Reported by Pylint.
Line: 23
Column: 5
class FCWithBootstrap(SamplingTrainableMixin, ModelLayer):
def __init__(
self,
model,
input_record,
output_dims,
num_bootstrap,
Reported by Pylint.
Line: 23
Column: 5
class FCWithBootstrap(SamplingTrainableMixin, ModelLayer):
def __init__(
self,
model,
input_record,
output_dims,
num_bootstrap,
Reported by Pylint.
benchmarks/operator_benchmark/pt/qunary_test.py
27 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized unary operators (point-wise and reduction)."""
# Configs for pointwise and reduction unary ops
Reported by Pylint.
Line: 10
Column: 28
# Configs for pointwise and reduction unary ops
qunary_ops_configs_short = op_bench.config_list(
attr_names=['M', 'N'],
attrs=[
[512, 512],
],
cross_product_configs={
Reported by Pylint.
Line: 21
Column: 27
tags=['short']
)
qunary_ops_configs_long = op_bench.cross_product_configs(
M=[256, 1024],
N=[256, 1024],
dtype=[torch.quint8, torch.qint8, torch.qint32],
tags=['long']
)
Reported by Pylint.
Line: 28
Column: 25
tags=['long']
)
class QUnaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype, op_func):
f_input = torch.rand(M, N)
scale = 1.0
zero_point = 0
self.inputs = {
Reported by Pylint.
Line: 45
Column: 19
# TODO: Uncomment the ops whenever they are implemented for quantized tensor.
qunary_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
# ['q_abs', torch.abs],
# ['q_abs_', torch.abs_],
# ['q_acos', torch.acos],
Reported by Pylint.
Line: 129
Column: 1
)
op_bench.generate_pt_tests_from_op_list(qunary_ops_list,
qunary_ops_configs_short + qunary_ops_configs_long,
QUnaryOpBenchmark)
# === Other unary ops (i.e. the ones that need parameters as args) ===
Reported by Pylint.
Line: 137
Column: 33
# === Other unary ops (i.e. the ones that need parameters as args) ===
# Configs for pointwise and reduction unary ops
qunary_ops_topk_configs_short = op_bench.config_list(
attr_names=['M', 'N', 'k'],
attrs=[
[512, 512, 5],
],
cross_product_configs={
Reported by Pylint.
Line: 148
Column: 32
tags=['short']
)
qunary_ops_topk_configs_long = op_bench.cross_product_configs(
M=[256, 1024],
N=[256, 1024],
k=[1, 3, 5],
dtype=[torch.quint8, torch.qint8, torch.qint32],
tags=['long']
Reported by Pylint.
Line: 156
Column: 24
tags=['long']
)
class QTopkOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype, k):
f_input = torch.rand(M, N)
scale = 1.0
zero_point = 0
self.inputs = {
Reported by Pylint.
Line: 172
Column: 1
def forward(self, q_input, k: int):
return torch.topk(q_input, k)
op_bench.generate_pt_test(qunary_ops_topk_configs_short + qunary_ops_topk_configs_long,
QTopkOpBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
caffe2/python/operator_test/sparse_to_dense_mask_op_test.py
27 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 9
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestFcOperator(hu.HypothesisTestCase):
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
Reported by Pylint.
Line: 13
Column: 1
import numpy as np
class TestFcOperator(hu.HypothesisTestCase):
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
Reported by Pylint.
Line: 18
Column: 5
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
Reported by Pylint.
Line: 18
Column: 5
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
Reported by Pylint.
Line: 18
Column: 5
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
Reported by Pylint.
Line: 18
Column: 5
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
Reported by Pylint.
Line: 18
Column: 5
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
Reported by Pylint.
Line: 18
Column: 5
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
Reported by Pylint.
benchmarks/operator_benchmark/pt/qrnn_test.py
27 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
from torch import nn
"""
Microbenchmarks for RNNs.
"""
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
from torch import nn
"""
Microbenchmarks for RNNs.
"""
Reported by Pylint.
Line: 10
Column: 16
Microbenchmarks for RNNs.
"""
qrnn_configs = op_bench.config_list(
attrs=[
[1, 3, 1],
[5, 7, 4],
],
# names: input_size, hidden_size, num_layers
Reported by Pylint.
Line: 25
Column: 21
tags=["short"]
)
class LSTMBenchmark(op_bench.TorchBenchmarkBase):
def init(self, I, H, NL, B, D, dtype):
sequence_len = 128
batch_size = 16
# The quantized.dynamic.LSTM has a bug. That's why we create a regular
Reported by Pylint.
Line: 68
Column: 1
def forward(self, x, h, c):
return self.cell(x, (h, c))[0]
op_bench.generate_pt_test(qrnn_configs, LSTMBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 6
Column: 1
import torch
from torch import nn
"""
Microbenchmarks for RNNs.
"""
qrnn_configs = op_bench.config_list(
attrs=[
Reported by Pylint.
Line: 32
Column: 9
# The quantized.dynamic.LSTM has a bug. That's why we create a regular
# LSTM, and quantize it later. See issue #31192.
scale = 1.0 / 256
zero_point = 0
cell_nn = nn.LSTM(
input_size=I,
hidden_size=H,
num_layers=NL,
Reported by Pylint.
Line: 33
Column: 9
# The quantized.dynamic.LSTM has a bug. That's why we create a regular
# LSTM, and quantize it later. See issue #31192.
scale = 1.0 / 256
zero_point = 0
cell_nn = nn.LSTM(
input_size=I,
hidden_size=H,
num_layers=NL,
bias=B,
Reported by Pylint.
Line: 44
Column: 9
bidirectional=D,
)
cell_temp = nn.Sequential(cell_nn)
self.cell = torch.quantization.quantize_dynamic(cell_temp,
{nn.LSTM, nn.Linear},
dtype=dtype)[0]
x = torch.randn(sequence_len, # sequence length
batch_size, # batch size
Reported by Pylint.
Line: 58
Column: 9
batch_size, # batch size
H) # hidden size
self.inputs = {
"x": x,
"h": h,
"c": c
}
self.set_module_name("QLSTM")
Reported by Pylint.
benchmarks/operator_benchmark/pt/qcat_test.py
27 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.quantized as nnq
from typing import List
"""Microbenchmarks for quantized Cat operator"""
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.quantized as nnq
from typing import List
"""Microbenchmarks for quantized Cat operator"""
Reported by Pylint.
Line: 11
Column: 22
"""Microbenchmarks for quantized Cat operator"""
# Configs for PT Cat operator
qcat_configs_short = op_bench.config_list(
attr_names=['M', 'N', 'K', 'L', 'dim'],
attrs=[
[256, 512, 1, 2, 0],
[512, 512, 2, 1, 1],
],
Reported by Pylint.
Line: 24
Column: 21
tags=['short'],
)
qcat_configs_long = op_bench.cross_product_configs(
M=[128, 1024],
N=[128, 1024],
K=[1, 2],
L=[5, 7],
dim=[0, 1, 2],
Reported by Pylint.
Line: 36
Column: 21
)
class QCatBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, L, dim, contig, dtype):
f_input = (torch.rand(M, N, K) - 0.5) * 256
self.qf = nnq.QFunctional()
scale = 1.0
zero_point = 0
Reported by Pylint.
Line: 67
Column: 1
return self.qf.cat(input, dim=dim)
op_bench.generate_pt_test(qcat_configs_short + qcat_configs_long,
QCatBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 8
Column: 1
from typing import List
"""Microbenchmarks for quantized Cat operator"""
# Configs for PT Cat operator
qcat_configs_short = op_bench.config_list(
attr_names=['M', 'N', 'K', 'L', 'dim'],
attrs=[
Reported by Pylint.
Line: 37
Column: 29
class QCatBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, L, dim, contig, dtype):
f_input = (torch.rand(M, N, K) - 0.5) * 256
self.qf = nnq.QFunctional()
scale = 1.0
zero_point = 0
self.qf.scale = scale
Reported by Pylint.
Line: 39
Column: 9
class QCatBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, L, dim, contig, dtype):
f_input = (torch.rand(M, N, K) - 0.5) * 256
self.qf = nnq.QFunctional()
scale = 1.0
zero_point = 0
self.qf.scale = scale
self.qf.zero_point = zero_point
Reported by Pylint.
Line: 51
Column: 13
q_input_non_contig = q_input.permute(permute_dims).contiguous()
q_input_non_contig = q_input_non_contig.permute(permute_dims)
if contig == 'all':
self.input = (q_input, q_input)
elif contig == 'one':
self.input = (q_input, q_input_non_contig)
elif contig == 'none':
self.input = (q_input_non_contig, q_input_non_contig)
Reported by Pylint.
torch/optim/rmsprop.py
27 issues
Line: 2
Column: 1
import torch
from . import _functional as F
from .optimizer import Optimizer
class RMSprop(Optimizer):
r"""Implements RMSprop algorithm.
Proposed by G. Hinton in his
Reported by Pylint.
Line: 3
Column: 1
import torch
from . import _functional as F
from .optimizer import Optimizer
class RMSprop(Optimizer):
r"""Implements RMSprop algorithm.
Proposed by G. Hinton in his
Reported by Pylint.
Line: 90
Column: 77
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
Reported by Pylint.
Line: 90
Column: 43
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
Reported by Pylint.
Line: 92
Column: 52
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
Reported by Pylint.
Line: 92
Column: 86
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
Reported by Pylint.
Line: 94
Column: 45
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
if group['momentum'] > 0:
momentum_buffer_list.append(state['momentum_buffer'])
Reported by Pylint.
Line: 94
Column: 79
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
square_avgs.append(state['square_avg'])
if group['momentum'] > 0:
momentum_buffer_list.append(state['momentum_buffer'])
Reported by Pylint.
Line: 1
Column: 1
import torch
from . import _functional as F
from .optimizer import Optimizer
class RMSprop(Optimizer):
r"""Implements RMSprop algorithm.
Proposed by G. Hinton in his
Reported by Pylint.
Line: 35
Column: 1
"""
def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= momentum:
Reported by Pylint.
test/jit/test_jit_utils.py
27 issues
Line: 6
Column: 1
from textwrap import dedent
import unittest
import torch
from torch.testing._internal import jit_utils
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 8
Column: 1
import torch
from torch.testing._internal import jit_utils
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 13
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 28
Column: 13
return x + y
self.assertEqual(
["x", "y"],
torch._jit_internal.get_callable_argument_names(fn_positional_or_keyword_args_only))
# Tests that POSITIONAL_ONLY arguments are ignored.
@unittest.skipIf(sys.version_info < (3, 8), 'POSITIONAL_ONLY arguments are not supported before 3.8')
def test_get_callable_argument_names_positional_only(self):
code = dedent('''
Reported by Pylint.
Line: 38
Column: 34
return x + y
''')
fn_positional_only_arg = jit_utils._get_py3_code(code, 'fn_positional_only_arg')
self.assertEqual(
[],
torch._jit_internal.get_callable_argument_names(fn_positional_only_arg))
# Tests that VAR_POSITIONAL arguments are ignored.
Reported by Pylint.
Line: 41
Column: 13
fn_positional_only_arg = jit_utils._get_py3_code(code, 'fn_positional_only_arg')
self.assertEqual(
[],
torch._jit_internal.get_callable_argument_names(fn_positional_only_arg))
# Tests that VAR_POSITIONAL arguments are ignored.
def test_get_callable_argument_names_var_positional(self):
# Tests that VAR_POSITIONAL arguments are ignored.
def fn_var_positional_arg(x, *arg):
Reported by Pylint.
Line: 50
Column: 13
return x + arg[0]
self.assertEqual(
[],
torch._jit_internal.get_callable_argument_names(fn_var_positional_arg))
# Tests that KEYWORD_ONLY arguments are ignored.
def test_get_callable_argument_names_keyword_only(self):
def fn_keyword_only_arg(x, *, y):
return x + y
Reported by Pylint.
Line: 58
Column: 13
return x + y
self.assertEqual(
[],
torch._jit_internal.get_callable_argument_names(fn_keyword_only_arg))
# Tests that VAR_KEYWORD arguments are ignored.
def test_get_callable_argument_names_var_keyword(self):
def fn_var_keyword_arg(**args):
return args['x'] + args['y']
Reported by Pylint.
Line: 66
Column: 13
return args['x'] + args['y']
self.assertEqual(
[],
torch._jit_internal.get_callable_argument_names(fn_var_keyword_arg))
# Tests that a function signature containing various different types of
# arguments are ignored.
@unittest.skipIf(sys.version_info < (3, 8), 'POSITIONAL_ONLY arguments are not supported before 3.8')
def test_get_callable_argument_names_hybrid(self):
Reported by Pylint.
Line: 76
Column: 26
def fn_hybrid_args(x, /, y, *args, **kwargs):
return x + y + args[0] + kwargs['z']
''')
fn_hybrid_args = jit_utils._get_py3_code(code, 'fn_hybrid_args')
self.assertEqual(
[],
torch._jit_internal.get_callable_argument_names(fn_hybrid_args))
Reported by Pylint.