The following issues were found
caffe2/python/attention.py
17 issues
Line: 109
Column: 5
# \textbf{W}^\alpha used in the context of \alpha_{sum}(a,b)
def _apply_fc_weight_for_sum_match(
model,
input,
dim_in,
dim_out,
scope,
name,
):
Reported by Pylint.
Line: 1
Column: 1
## @package attention
# Module caffe2.python.attention
from caffe2.python import brew
Reported by Pylint.
Line: 11
Column: 1
from caffe2.python import brew
class AttentionType:
Regular, Recurrent, Dot, SoftCoverage = tuple(range(4))
def s(scope, name):
# We have to manually scope due to our internal/external blob
Reported by Pylint.
Line: 11
Column: 1
from caffe2.python import brew
class AttentionType:
Regular, Recurrent, Dot, SoftCoverage = tuple(range(4))
def s(scope, name):
# We have to manually scope due to our internal/external blob
Reported by Pylint.
Line: 15
Column: 1
Regular, Recurrent, Dot, SoftCoverage = tuple(range(4))
def s(scope, name):
# We have to manually scope due to our internal/external blob
# relationships.
return "{}/{}".format(str(scope), str(name))
Reported by Pylint.
Line: 15
Column: 1
Regular, Recurrent, Dot, SoftCoverage = tuple(range(4))
def s(scope, name):
# We have to manually scope due to our internal/external blob
# relationships.
return "{}/{}".format(str(scope), str(name))
Reported by Pylint.
Line: 107
Column: 1
# \textbf{W}^\alpha used in the context of \alpha_{sum}(a,b)
def _apply_fc_weight_for_sum_match(
model,
input,
dim_in,
dim_out,
scope,
Reported by Pylint.
Line: 132
Column: 1
# Implement RecAtt due to section 4.1 in http://arxiv.org/abs/1601.03317
def apply_recurrent_attention(
model,
encoder_output_dim,
encoder_outputs_transposed,
weighted_encoder_outputs,
decoder_hidden_state_t,
Reported by Pylint.
Line: 132
Column: 1
# Implement RecAtt due to section 4.1 in http://arxiv.org/abs/1601.03317
def apply_recurrent_attention(
model,
encoder_output_dim,
encoder_outputs_transposed,
weighted_encoder_outputs,
decoder_hidden_state_t,
Reported by Pylint.
Line: 132
Column: 1
# Implement RecAtt due to section 4.1 in http://arxiv.org/abs/1601.03317
def apply_recurrent_attention(
model,
encoder_output_dim,
encoder_outputs_transposed,
weighted_encoder_outputs,
decoder_hidden_state_t,
Reported by Pylint.
benchmarks/operator_benchmark/pt/qcomparators_test.py
16 issues
Line: 1
Column: 1
import torch
import operator_benchmark as op_bench
qcomparators_configs = op_bench.cross_product_configs(
N=(8, 64),
dtype=(torch.quint8, torch.qint8, torch.qint32),
contig=(False, True),
other_scalar=(False, True),
Reported by Pylint.
Line: 5
Column: 24
import operator_benchmark as op_bench
qcomparators_configs = op_bench.cross_product_configs(
N=(8, 64),
dtype=(torch.quint8, torch.qint8, torch.qint32),
contig=(False, True),
other_scalar=(False, True),
out_variant=(False, True),
Reported by Pylint.
Line: 14
Column: 20
tags=('short',)
)
qcomparators_ops = op_bench.op_list(
attrs=(
('eq', torch.eq),
('ne', torch.ne),
('lt', torch.lt),
('gt', torch.gt),
Reported by Pylint.
Line: 27
Column: 28
)
class QComparatorBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, dtype, contig, other_scalar, out_variant, op_func):
# TODO: Consider more diverse shapes
f_input = (torch.rand(N, N) - 0.5) * 256
scale = 1.0
zero_point = 0
Reported by Pylint.
Line: 65
Column: 1
op_bench.generate_pt_tests_from_op_list(qcomparators_ops,
qcomparators_configs,
QComparatorBenchmark)
if __name__ == '__main__':
Reported by Pylint.
Line: 29
Column: 3
class QComparatorBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, dtype, contig, other_scalar, out_variant, op_func):
# TODO: Consider more diverse shapes
f_input = (torch.rand(N, N) - 0.5) * 256
scale = 1.0
zero_point = 0
q_input_a = torch.quantize_per_tensor(f_input, scale=scale,
Reported by Pylint.
Line: 43
Column: 9
permute_dims = list(range(f_input.ndim))[::-1]
q_input_a = q_input_a.permute(permute_dims)
self.qop = op_func
self.inputs = {
"q_input_a": q_input_a,
"q_input_b": q_input_b,
"out_variant": out_variant,
"other_scalar": other_scalar,
Reported by Pylint.
Line: 44
Column: 9
q_input_a = q_input_a.permute(permute_dims)
self.qop = op_func
self.inputs = {
"q_input_a": q_input_a,
"q_input_b": q_input_b,
"out_variant": out_variant,
"other_scalar": other_scalar,
}
Reported by Pylint.
Line: 1
Column: 1
import torch
import operator_benchmark as op_bench
qcomparators_configs = op_bench.cross_product_configs(
N=(8, 64),
dtype=(torch.quint8, torch.qint8, torch.qint32),
contig=(False, True),
other_scalar=(False, True),
Reported by Pylint.
Line: 27
Column: 1
)
class QComparatorBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, dtype, contig, other_scalar, out_variant, op_func):
# TODO: Consider more diverse shapes
f_input = (torch.rand(N, N) - 0.5) * 256
scale = 1.0
zero_point = 0
Reported by Pylint.
benchmarks/operator_benchmark/pt/bmm_test.py
16 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
class BmmBenchmark(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, K, device):
self.inputs = {
"batch1": torch.rand((B, M, K), device=device, requires_grad=self.auto_set()),
Reported by Pylint.
Line: 6
Column: 20
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
class BmmBenchmark(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, K, device):
self.inputs = {
"batch1": torch.rand((B, M, K), device=device, requires_grad=self.auto_set()),
"batch2": torch.rand((B, K, N,), device=device, requires_grad=self.auto_set())
}
Reported by Pylint.
Line: 17
Column: 15
def forward(self, batch1, batch2):
return torch.bmm(batch1, batch2)
bmm_configs = op_bench.cross_product_configs(
B=[2, 100],
M=[8, 256],
N=[256, 16],
K=[16, 32],
device=['cpu'],
Reported by Pylint.
Line: 26
Column: 1
tags=["short"],
)
op_bench.generate_pt_test(bmm_configs, BmmBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
class BmmBenchmark(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, K, device):
self.inputs = {
"batch1": torch.rand((B, M, K), device=device, requires_grad=self.auto_set()),
Reported by Pylint.
Line: 8
Column: 9
class BmmBenchmark(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, K, device):
self.inputs = {
"batch1": torch.rand((B, M, K), device=device, requires_grad=self.auto_set()),
"batch2": torch.rand((B, K, N,), device=device, requires_grad=self.auto_set())
}
self.set_module_name("bmm")
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
class BmmBenchmark(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, K, device):
self.inputs = {
"batch1": torch.rand((B, M, K), device=device, requires_grad=self.auto_set()),
Reported by Pylint.
Line: 6
Column: 1
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
class BmmBenchmark(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, K, device):
self.inputs = {
"batch1": torch.rand((B, M, K), device=device, requires_grad=self.auto_set()),
"batch2": torch.rand((B, K, N,), device=device, requires_grad=self.auto_set())
}
Reported by Pylint.
Line: 7
Column: 5
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
class BmmBenchmark(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, K, device):
self.inputs = {
"batch1": torch.rand((B, M, K), device=device, requires_grad=self.auto_set()),
"batch2": torch.rand((B, K, N,), device=device, requires_grad=self.auto_set())
}
self.set_module_name("bmm")
Reported by Pylint.
Line: 7
Column: 5
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
class BmmBenchmark(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, K, device):
self.inputs = {
"batch1": torch.rand((B, M, K), device=device, requires_grad=self.auto_set()),
"batch2": torch.rand((B, K, N,), device=device, requires_grad=self.auto_set())
}
self.set_module_name("bmm")
Reported by Pylint.
caffe2/python/modeling/compute_histogram_for_blobs_test.py
16 issues
Line: 65
Column: 19
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_curr_normalized_hist = workspace.FetchBlob('fc1_w_curr_normalized_hist')
cur_hist, acc_hist = self.histogram(fc1_w,
lower_bound=lower_bound,
upper_bound=upper_bound,
num_buckets=num_buckets)
self.assertEqual(fc1_w_curr_normalized_hist.size, num_buckets + 2)
Reported by Pylint.
Line: 104
Column: 19
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_curr_normalized_hist = workspace.FetchBlob('fc1_w_curr_normalized_hist')
cur_hist, acc_hist = self.histogram(fc1_w,
lower_bound=lower_bound,
upper_bound=upper_bound,
num_buckets=num_buckets)
self.assertEqual(fc1_w_curr_normalized_hist.size, num_buckets + 2)
Reported by Pylint.
Line: 1
Column: 1
import unittest
from caffe2.python import workspace, brew, model_helper
from caffe2.python.modeling.compute_histogram_for_blobs import (
ComputeHistogramForBlobs
Reported by Pylint.
Line: 15
Column: 1
import numpy as np
class ComputeHistogramForBlobsTest(unittest.TestCase):
def histogram(self, X, lower_bound=0.0, upper_bound=1.0, num_buckets=20):
assert X.ndim == 2, ('this test assume 2d array, but X.ndim is {0}'.
format(X.ndim))
N, M = X.shape
Reported by Pylint.
Line: 17
Column: 5
class ComputeHistogramForBlobsTest(unittest.TestCase):
def histogram(self, X, lower_bound=0.0, upper_bound=1.0, num_buckets=20):
assert X.ndim == 2, ('this test assume 2d array, but X.ndim is {0}'.
format(X.ndim))
N, M = X.shape
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Reported by Pylint.
Line: 17
Column: 5
class ComputeHistogramForBlobsTest(unittest.TestCase):
def histogram(self, X, lower_bound=0.0, upper_bound=1.0, num_buckets=20):
assert X.ndim == 2, ('this test assume 2d array, but X.ndim is {0}'.
format(X.ndim))
N, M = X.shape
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Reported by Pylint.
Line: 17
Column: 5
class ComputeHistogramForBlobsTest(unittest.TestCase):
def histogram(self, X, lower_bound=0.0, upper_bound=1.0, num_buckets=20):
assert X.ndim == 2, ('this test assume 2d array, but X.ndim is {0}'.
format(X.ndim))
N, M = X.shape
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Reported by Pylint.
Line: 18
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
class ComputeHistogramForBlobsTest(unittest.TestCase):
def histogram(self, X, lower_bound=0.0, upper_bound=1.0, num_buckets=20):
assert X.ndim == 2, ('this test assume 2d array, but X.ndim is {0}'.
format(X.ndim))
N, M = X.shape
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Y = np.zeros((N, M), dtype=np.int32)
Reported by Bandit.
Line: 20
Column: 12
def histogram(self, X, lower_bound=0.0, upper_bound=1.0, num_buckets=20):
assert X.ndim == 2, ('this test assume 2d array, but X.ndim is {0}'.
format(X.ndim))
N, M = X.shape
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Y = np.zeros((N, M), dtype=np.int32)
Y[X < lower_bound] = 0
Y[X >= upper_bound] = num_buckets + 1
Reported by Pylint.
Line: 20
Column: 9
def histogram(self, X, lower_bound=0.0, upper_bound=1.0, num_buckets=20):
assert X.ndim == 2, ('this test assume 2d array, but X.ndim is {0}'.
format(X.ndim))
N, M = X.shape
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Y = np.zeros((N, M), dtype=np.int32)
Y[X < lower_bound] = 0
Y[X >= upper_bound] = num_buckets + 1
Reported by Pylint.
benchmarks/operator_benchmark/pt/diag_test.py
16 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for diag operator"""
# Configs for PT diag operator
diag_configs_short = op_bench.config_list(
Reported by Pylint.
Line: 9
Column: 22
# Configs for PT diag operator
diag_configs_short = op_bench.config_list(
attr_names=['dim', 'M', 'N', 'diagonal', 'out'],
attrs=[
[1, 64, 64, 0, True],
[2, 128, 128, -10, False],
[1, 256, 256, 20, True],
Reported by Pylint.
Line: 23
Column: 21
)
class DiagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dim, M, N, diagonal, out, device):
self.inputs = {
"input": torch.rand(M, N, device=device) if dim == 2 else torch.rand(M, device=device),
"diagonal": diagonal,
"out": out,
Reported by Pylint.
Line: 40
Column: 1
return torch.diag(input, diagonal=diagonal)
op_bench.generate_pt_test(diag_configs_short, DiagBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 5
Column: 1
import torch
"""Microbenchmarks for diag operator"""
# Configs for PT diag operator
diag_configs_short = op_bench.config_list(
attr_names=['dim', 'M', 'N', 'diagonal', 'out'],
Reported by Pylint.
Line: 25
Column: 9
class DiagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dim, M, N, diagonal, out, device):
self.inputs = {
"input": torch.rand(M, N, device=device) if dim == 2 else torch.rand(M, device=device),
"diagonal": diagonal,
"out": out,
"out_tensor": torch.tensor((),)
}
Reported by Pylint.
Line: 33
Column: 23
}
self.set_module_name('diag')
def forward(self, input, diagonal: int, out: bool, out_tensor):
if out:
return torch.diag(input, diagonal=diagonal, out=out_tensor)
else:
return torch.diag(input, diagonal=diagonal)
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for diag operator"""
# Configs for PT diag operator
diag_configs_short = op_bench.config_list(
Reported by Pylint.
Line: 23
Column: 1
)
class DiagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dim, M, N, diagonal, out, device):
self.inputs = {
"input": torch.rand(M, N, device=device) if dim == 2 else torch.rand(M, device=device),
"diagonal": diagonal,
"out": out,
Reported by Pylint.
Line: 24
Column: 5
class DiagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dim, M, N, diagonal, out, device):
self.inputs = {
"input": torch.rand(M, N, device=device) if dim == 2 else torch.rand(M, device=device),
"diagonal": diagonal,
"out": out,
"out_tensor": torch.tensor((),)
Reported by Pylint.
benchmarks/operator_benchmark/pt/qtensor_method_test.py
16 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
# Configs for pointwise and reduction unary ops
qmethods_configs_short = op_bench.config_list(
attr_names=['M', 'N'],
attrs=[
[32, 32],
],
Reported by Pylint.
Line: 5
Column: 26
import torch
# Configs for pointwise and reduction unary ops
qmethods_configs_short = op_bench.config_list(
attr_names=['M', 'N'],
attrs=[
[32, 32],
],
cross_product_configs={
Reported by Pylint.
Line: 17
Column: 25
tags=['short']
)
qmethods_configs_long = op_bench.cross_product_configs(
M=[256, 1024],
N=[256, 1024],
dtype=[torch.qint8, torch.qint32],
contig=[False, True],
tags=['long']
Reported by Pylint.
Line: 26
Column: 29
)
class _QMethodBenchmarkBase(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype, contig):
f_input = torch.rand(M, N)
scale = 1.0
zero_point = 0
self.q_input = torch.quantize_per_tensor(f_input, scale=scale,
Reported by Pylint.
Line: 48
Column: 1
return q_input.copy_(q_input)
op_bench.generate_pt_test(
qmethods_configs_short + qmethods_configs_long,
QMethodTensorInputCopyBenchmark
)
if __name__ == "__main__":
Reported by Pylint.
Line: 31
Column: 9
f_input = torch.rand(M, N)
scale = 1.0
zero_point = 0
self.q_input = torch.quantize_per_tensor(f_input, scale=scale,
zero_point=zero_point,
dtype=dtype)
if not contig:
permute_dims = list(range(self.q_input.ndim))[::-1]
self.q_input = self.q_input.permute(permute_dims)
Reported by Pylint.
Line: 36
Column: 13
dtype=dtype)
if not contig:
permute_dims = list(range(self.q_input.ndim))[::-1]
self.q_input = self.q_input.permute(permute_dims)
self.inputs = {
"q_input": self.q_input,
}
Reported by Pylint.
Line: 38
Column: 9
permute_dims = list(range(self.q_input.ndim))[::-1]
self.q_input = self.q_input.permute(permute_dims)
self.inputs = {
"q_input": self.q_input,
}
class QMethodTensorInputCopyBenchmark(_QMethodBenchmarkBase):
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
# Configs for pointwise and reduction unary ops
qmethods_configs_short = op_bench.config_list(
attr_names=['M', 'N'],
attrs=[
[32, 32],
],
Reported by Pylint.
Line: 26
Column: 1
)
class _QMethodBenchmarkBase(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype, contig):
f_input = torch.rand(M, N)
scale = 1.0
zero_point = 0
self.q_input = torch.quantize_per_tensor(f_input, scale=scale,
Reported by Pylint.
caffe2/python/layers/arc_cosine_feature_map.py
16 issues
Line: 1
Column: 1
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
Reported by Pylint.
Line: 11
Column: 1
import numpy as np
class ArcCosineFeatureMap(ModelLayer):
"""
A general version of the arc-cosine kernel feature map (s = 1 restores
the original arc-cosine kernel feature map).
Applies H(x) * x^s, where H is the Heaviside step function and x is the
Reported by Pylint.
Line: 36
Column: 5
initialize_output_schema -- if True, initialize output schema as Scalar
from Arc Cosine; else output schema is None
"""
def __init__(
self,
model,
input_record,
output_dims,
s=1,
Reported by Pylint.
Line: 36
Column: 5
initialize_output_schema -- if True, initialize output schema as Scalar
from Arc Cosine; else output schema is None
"""
def __init__(
self,
model,
input_record,
output_dims,
s=1,
Reported by Pylint.
Line: 52
Column: 9
name='arc_cosine_feature_map',
**kwargs):
super(ArcCosineFeatureMap, self).__init__(model, name, input_record,
**kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
self.params = []
self.model = model
self.set_weight_as_global_constant = set_weight_as_global_constant
Reported by Pylint.
Line: 54
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
super(ArcCosineFeatureMap, self).__init__(model, name, input_record,
**kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
self.params = []
self.model = model
self.set_weight_as_global_constant = set_weight_as_global_constant
self.input_dims = input_record.field_type().shape[0]
Reported by Bandit.
Line: 60
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self.set_weight_as_global_constant = set_weight_as_global_constant
self.input_dims = input_record.field_type().shape[0]
assert self.input_dims >= 1, "Expected input dimensions >= 1, got %s" \
% self.input_dims
if initialize_output_schema:
self.output_schema = schema.Scalar(
(np.float32, (output_dims, )),
Reported by Bandit.
Line: 70
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
)
self.output_dims = output_dims
assert self.output_dims >= 1, "Expected output dimensions >= 1, got %s" \
% self.output_dims
self.s = s
assert (self.s >= 0), "Expected s >= 0, got %s" % self.s
assert isinstance(self.s, int), "Expected s to be type int, got type %s" \
% type(self.s)
Reported by Bandit.
Line: 72
Column: 9
self.output_dims = output_dims
assert self.output_dims >= 1, "Expected output dimensions >= 1, got %s" \
% self.output_dims
self.s = s
assert (self.s >= 0), "Expected s >= 0, got %s" % self.s
assert isinstance(self.s, int), "Expected s to be type int, got type %s" \
% type(self.s)
assert (scale > 0.0), "Expected scale > 0, got %s" % scale
Reported by Pylint.
Line: 73
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert self.output_dims >= 1, "Expected output dimensions >= 1, got %s" \
% self.output_dims
self.s = s
assert (self.s >= 0), "Expected s >= 0, got %s" % self.s
assert isinstance(self.s, int), "Expected s to be type int, got type %s" \
% type(self.s)
assert (scale > 0.0), "Expected scale > 0, got %s" % scale
self.stddev = scale * np.sqrt(1.0 / self.input_dims)
Reported by Bandit.
benchmarks/operator_benchmark/pt/index_select_test.py
16 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
import numpy
"""Microbenchmarks for index_select operator."""
# An example input from this configuration is M=4, N=4, dim=0.
index_select_configs_short = op_bench.config_list(
Reported by Pylint.
Line: 9
Column: 30
"""Microbenchmarks for index_select operator."""
# An example input from this configuration is M=4, N=4, dim=0.
index_select_configs_short = op_bench.config_list(
attr_names=["M", "N", "K", "dim"],
attrs=[
[8, 8, 1, 1],
[256, 512, 1, 1],
[512, 512, 1, 1],
Reported by Pylint.
Line: 26
Column: 29
)
index_select_configs_long = op_bench.cross_product_configs(
M=[128, 1024],
N=[128, 1024],
K=[1, 2],
dim=[1],
device=['cpu', 'cuda'],
Reported by Pylint.
Line: 36
Column: 28
)
class IndexSelectBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, dim, device):
max_val = N
numpy.random.seed((1 << 32) - 1)
index_dim = numpy.random.randint(0, N)
self.inputs = {
Reported by Pylint.
Line: 52
Column: 1
return torch.index_select(input_one, dim, index)
op_bench.generate_pt_test(index_select_configs_short + index_select_configs_long,
IndexSelectBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 6
Column: 1
import numpy
"""Microbenchmarks for index_select operator."""
# An example input from this configuration is M=4, N=4, dim=0.
index_select_configs_short = op_bench.config_list(
attr_names=["M", "N", "K", "dim"],
attrs=[
Reported by Pylint.
Line: 41
Column: 9
max_val = N
numpy.random.seed((1 << 32) - 1)
index_dim = numpy.random.randint(0, N)
self.inputs = {
"input_one": torch.rand(M, N, K, device=device),
"dim" : dim,
"index" : torch.tensor(numpy.random.randint(0, max_val, index_dim), device=device),
}
self.set_module_name("index_select")
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
import numpy
"""Microbenchmarks for index_select operator."""
# An example input from this configuration is M=4, N=4, dim=0.
index_select_configs_short = op_bench.config_list(
Reported by Pylint.
Line: 36
Column: 1
)
class IndexSelectBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, dim, device):
max_val = N
numpy.random.seed((1 << 32) - 1)
index_dim = numpy.random.randint(0, N)
self.inputs = {
Reported by Pylint.
Line: 37
Column: 5
class IndexSelectBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, dim, device):
max_val = N
numpy.random.seed((1 << 32) - 1)
index_dim = numpy.random.randint(0, N)
self.inputs = {
"input_one": torch.rand(M, N, K, device=device),
Reported by Pylint.
benchmarks/operator_benchmark/pt/interpolate_test.py
16 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for interpolate operator."""
class InterpolateBenchmark(op_bench.TorchBenchmarkBase):
def init(self, input_size, output_size, channels_last=False, mode='linear', dtype=torch.float):
Reported by Pylint.
Line: 7
Column: 28
"""Microbenchmarks for interpolate operator."""
class InterpolateBenchmark(op_bench.TorchBenchmarkBase):
def init(self, input_size, output_size, channels_last=False, mode='linear', dtype=torch.float):
input_image = torch.randint(0, 256, size=input_size, dtype=dtype, device='cpu',
requires_grad=self.auto_set())
if channels_last:
Reported by Pylint.
Line: 46
Column: 16
align_corners=align_corners)
config_short = op_bench.config_list(
attr_names=["input_size", "output_size"],
attrs=[
[(1, 3, 60, 40), (24, 24)],
[(1, 3, 600, 400), (240, 240)],
[(1, 3, 320, 320), (256, 256)],
Reported by Pylint.
Line: 60
Column: 17
tags=["short"],
)
config_short += op_bench.config_list(
attr_names=["input_size", "output_size"],
attrs=[
[(1, 3, 60, 40), (24, 24)],
[(1, 3, 600, 400), (240, 240)],
[(1, 3, 320, 320), (256, 256)],
Reported by Pylint.
Line: 76
Column: 15
)
config_long = op_bench.config_list(
attr_names=["input_size", "output_size"],
attrs=[
[(1, 3, 320, 320), (512, 512)],
[(1, 3, 500, 500), (256, 256)],
[(1, 3, 500, 500), (800, 800)],
Reported by Pylint.
Line: 95
Column: 13
)
config_3d = op_bench.config_list(
# no channels_last for 3D tensors
attr_names=["input_size", "output_size"],
attrs=[
[(4, 512, 320), (256,)],
[(4, 512, 320), (512,)],
Reported by Pylint.
Line: 109
Column: 13
)
config_5d = op_bench.config_list(
attr_names=["input_size", "output_size"],
attrs=[
[(1, 3, 16, 320, 320), (8, 256, 256)],
[(1, 3, 16, 320, 320), (32, 512, 512)],
Reported by Pylint.
Line: 128
Column: 5
for config in (config_short, config_long, config_3d, config_5d):
op_bench.generate_pt_test(config, InterpolateBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for interpolate operator."""
class InterpolateBenchmark(op_bench.TorchBenchmarkBase):
def init(self, input_size, output_size, channels_last=False, mode='linear', dtype=torch.float):
Reported by Pylint.
Line: 32
Column: 9
5: 'trilinear',
}[input_image.ndim]
self.inputs = {
"input_image": input_image,
"output_size": output_size,
"mode": mode,
"align_corners": align_corners,
}
Reported by Pylint.
caffe2/python/mkl/mkl_fc_speed_test.py
16 issues
Line: 12
Column: 22
from caffe2.python import core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testFCSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
Reported by Pylint.
Line: 92
Column: 9
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
if __name__ == '__main__':
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util
Reported by Pylint.
Line: 13
Column: 1
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testFCSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 256, 6, 6).astype(np.float32) - 0.5
Reported by Pylint.
Line: 14
Column: 5
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testFCSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 256, 6, 6).astype(np.float32) - 0.5
#X = np.random.rand(32, 256*6*6).astype(np.float32) - 0.5
Reported by Pylint.
Line: 14
Column: 5
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testFCSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 256, 6, 6).astype(np.float32) - 0.5
#X = np.random.rand(32, 256*6*6).astype(np.float32) - 0.5
Reported by Pylint.
Line: 14
Column: 5
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testFCSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 256, 6, 6).astype(np.float32) - 0.5
#X = np.random.rand(32, 256*6*6).astype(np.float32) - 0.5
Reported by Pylint.
Line: 18
Column: 9
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 256, 6, 6).astype(np.float32) - 0.5
#X = np.random.rand(32, 256*6*6).astype(np.float32) - 0.5
W = np.random.rand(4096, 9216).astype(np.float32) - 0.5
b = np.random.rand(4096).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
Reported by Pylint.
Line: 20
Column: 9
# case for MKL during deployment time.
X = np.random.rand(1, 256, 6, 6).astype(np.float32) - 0.5
#X = np.random.rand(32, 256*6*6).astype(np.float32) - 0.5
W = np.random.rand(4096, 9216).astype(np.float32) - 0.5
b = np.random.rand(4096).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
Reported by Pylint.
Line: 21
Column: 9
X = np.random.rand(1, 256, 6, 6).astype(np.float32) - 0.5
#X = np.random.rand(32, 256*6*6).astype(np.float32) - 0.5
W = np.random.rand(4096, 9216).astype(np.float32) - 0.5
b = np.random.rand(4096).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
Reported by Pylint.