The following issues were found
caffe2/python/ideep/weightedsum_op_test.py
17 issues
Line: 7
Column: 1
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 11
Column: 1
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestWeightedSumOp(hu.HypothesisTestCase):
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestWeightedSumOp(hu.HypothesisTestCase):
@given(n=st.integers(5, 8), m=st.integers(1, 1),
d=st.integers(2, 4), grad_on_w=st.booleans(),
**mu.gcs_ideep_only)
def test_weighted_sum(self, n, m, d, grad_on_w, gc, dc):
Reported by Pylint.
Line: 20
Column: 57
@given(n=st.integers(5, 8), m=st.integers(1, 1),
d=st.integers(2, 4), grad_on_w=st.booleans(),
**mu.gcs_ideep_only)
def test_weighted_sum(self, n, m, d, grad_on_w, gc, dc):
input_names = []
input_vars = []
for i in range(m):
X_name = 'X' + str(i)
w_name = 'w' + str(i)
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 8
Column: 1
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 12
Column: 1
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestWeightedSumOp(hu.HypothesisTestCase):
@given(n=st.integers(5, 8), m=st.integers(1, 1),
Reported by Pylint.
Line: 16
Column: 1
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestWeightedSumOp(hu.HypothesisTestCase):
@given(n=st.integers(5, 8), m=st.integers(1, 1),
d=st.integers(2, 4), grad_on_w=st.booleans(),
**mu.gcs_ideep_only)
def test_weighted_sum(self, n, m, d, grad_on_w, gc, dc):
input_names = []
Reported by Pylint.
Line: 20
Column: 5
@given(n=st.integers(5, 8), m=st.integers(1, 1),
d=st.integers(2, 4), grad_on_w=st.booleans(),
**mu.gcs_ideep_only)
def test_weighted_sum(self, n, m, d, grad_on_w, gc, dc):
input_names = []
input_vars = []
for i in range(m):
X_name = 'X' + str(i)
w_name = 'w' + str(i)
Reported by Pylint.
Line: 20
Column: 5
@given(n=st.integers(5, 8), m=st.integers(1, 1),
d=st.integers(2, 4), grad_on_w=st.booleans(),
**mu.gcs_ideep_only)
def test_weighted_sum(self, n, m, d, grad_on_w, gc, dc):
input_names = []
input_vars = []
for i in range(m):
X_name = 'X' + str(i)
w_name = 'w' + str(i)
Reported by Pylint.
benchmarks/operator_benchmark/pt/linear_unpack_fp16_test.py
17 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for linear_unpack_fp16_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT linear_unpack_fp16 operator
linear_unpack_fp16_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
Reported by Pylint.
Line: 7
Column: 35
"""Microbenchmarks for linear_unpack_fp16_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT linear_unpack_fp16 operator
linear_unpack_fp16_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
K=[256, 512],
device=['cpu'],
tags=["long"]
Reported by Pylint.
Line: 15
Column: 36
tags=["long"]
)
linear_unpack_fp16_short_configs = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 1, 1],
[64, 64, 64],
[64, 64, 128],
Reported by Pylint.
Line: 28
Column: 33
tags=["short"],
)
class LinearUnpackFP16Benchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
# input to unpack operator must be what the output is for prepack operator
self.inputs = {
"input_one": torch.ops.quantized.linear_prepack_fp16(torch.rand(M, N, K, device=device,
requires_grad=False,
Reported by Pylint.
Line: 44
Column: 1
# The generated test names based on linear_unpack_fp16_short_configs will be in the following pattern:
# linear_unpack_fp16_M8_N16_K32_devicecpu
op_bench.generate_pt_test(linear_unpack_fp16_long_configs + linear_unpack_fp16_short_configs, LinearUnpackFP16Benchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for linear_unpack_fp16_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT linear_unpack_fp16 operator
linear_unpack_fp16_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
Reported by Pylint.
Line: 31
Column: 9
class LinearUnpackFP16Benchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
# input to unpack operator must be what the output is for prepack operator
self.inputs = {
"input_one": torch.ops.quantized.linear_prepack_fp16(torch.rand(M, N, K, device=device,
requires_grad=False,
dtype=torch.float32))
}
self.set_module_name("linear_unpack_fp16")
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for linear_unpack_fp16_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT linear_unpack_fp16 operator
linear_unpack_fp16_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
Reported by Pylint.
Line: 28
Column: 1
tags=["short"],
)
class LinearUnpackFP16Benchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
# input to unpack operator must be what the output is for prepack operator
self.inputs = {
"input_one": torch.ops.quantized.linear_prepack_fp16(torch.rand(M, N, K, device=device,
requires_grad=False,
Reported by Pylint.
Line: 29
Column: 5
)
class LinearUnpackFP16Benchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
# input to unpack operator must be what the output is for prepack operator
self.inputs = {
"input_one": torch.ops.quantized.linear_prepack_fp16(torch.rand(M, N, K, device=device,
requires_grad=False,
dtype=torch.float32))
Reported by Pylint.
benchmarks/operator_benchmark/pt/sum_test.py
17 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for sum reduction operator."""
# Configs for PT add operator
sum_configs = op_bench.cross_product_configs(
R=[64, 256], # Length of reduced dimension
V=[32, 512], # Length of other dimension
Reported by Pylint.
Line: 7
Column: 15
"""Microbenchmarks for sum reduction operator."""
# Configs for PT add operator
sum_configs = op_bench.cross_product_configs(
R=[64, 256], # Length of reduced dimension
V=[32, 512], # Length of other dimension
dim=[0, 1],
contiguous=[True, False],
device=['cpu', 'cuda'],
Reported by Pylint.
Line: 14
Column: 5
contiguous=[True, False],
device=['cpu', 'cuda'],
tags=['short']
) + op_bench.cross_product_configs(
R=[1024, 8192],
V=[512, 1024],
dim=[0, 1],
contiguous=[True, False],
device=['cpu', 'cuda'],
Reported by Pylint.
Line: 24
Column: 20
)
class SumBenchmark(op_bench.TorchBenchmarkBase):
def init(self, R, V, dim, contiguous, device):
shape = (R, V) if dim == 0 else (V, R)
tensor = torch.rand(shape, device=device)
if not contiguous:
Reported by Pylint.
Line: 45
Column: 1
def forward(self, input_tensor, dim: int):
return input_tensor.sum(dim=dim)
op_bench.generate_pt_test(sum_configs, SumBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for sum reduction operator."""
# Configs for PT add operator
sum_configs = op_bench.cross_product_configs(
R=[64, 256], # Length of reduced dimension
V=[32, 512], # Length of other dimension
Reported by Pylint.
Line: 32
Column: 13
if not contiguous:
storage = torch.empty([s * 2 for s in shape], device=device)
storage[::2, ::2] = tensor
self.input_tensor = storage[::2, ::2]
else:
self.input_tensor = tensor
self.inputs = {
"input_tensor": self.input_tensor,
Reported by Pylint.
Line: 34
Column: 13
storage[::2, ::2] = tensor
self.input_tensor = storage[::2, ::2]
else:
self.input_tensor = tensor
self.inputs = {
"input_tensor": self.input_tensor,
"dim": dim
}
Reported by Pylint.
Line: 36
Column: 9
else:
self.input_tensor = tensor
self.inputs = {
"input_tensor": self.input_tensor,
"dim": dim
}
self.set_module_name("sum")
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for sum reduction operator."""
# Configs for PT add operator
sum_configs = op_bench.cross_product_configs(
R=[64, 256], # Length of reduced dimension
V=[32, 512], # Length of other dimension
Reported by Pylint.
caffe2/python/mkl/mkl_copy_op_test.py
17 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
import caffe2.proto.caffe2_pb2 as pb2
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
import caffe2.proto.caffe2_pb2 as pb2
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKCopyTest(hu.HypothesisTestCase):
@given(width=st.integers(7, 9),
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.mkl_test_util as mu
import caffe2.proto.caffe2_pb2 as pb2
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKCopyTest(hu.HypothesisTestCase):
@given(width=st.integers(7, 9),
height=st.integers(7, 9),
input_channels=st.integers(1, 3),
Reported by Pylint.
Line: 28
Column: 27
height,
input_channels,
batch_size,
gc, dc):
X = np.random.rand(
batch_size, input_channels, width, height).astype(np.float32)
self.ws.create_blob("X").feed(X, pb2.DeviceOption())
self.ws.run(core.CreateOperator(
"CopyCPUToMKL",
Reported by Pylint.
Line: 28
Column: 23
height,
input_channels,
batch_size,
gc, dc):
X = np.random.rand(
batch_size, input_channels, width, height).astype(np.float32)
self.ws.create_blob("X").feed(X, pb2.DeviceOption())
self.ws.run(core.CreateOperator(
"CopyCPUToMKL",
Reported by Pylint.
Line: 67
Column: 5
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
Line: 17
Column: 1
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKCopyTest(hu.HypothesisTestCase):
@given(width=st.integers(7, 9),
height=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
**mu.gcs)
Reported by Pylint.
Line: 23
Column: 5
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
**mu.gcs)
def test_mkl_copy(self,
width,
height,
input_channels,
batch_size,
gc, dc):
Reported by Pylint.
benchmarks/operator_benchmark/pt/gelu_test.py
17 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
"""
Microbenchmarks for the gelu operators.
"""
Reported by Pylint.
Line: 10
Column: 21
Microbenchmarks for the gelu operators.
"""
gelu_configs_long = op_bench.cross_product_configs(
N=[1, 4],
C=[3],
H=[16, 256],
W=[16, 256],
device=['cpu'],
Reported by Pylint.
Line: 20
Column: 21
)
class GeluBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device):
self.inputs = {
"input": torch.rand(N, C, H, W, device=device)
}
Reported by Pylint.
Line: 30
Column: 1
return torch.nn.functional.gelu(input)
op_bench.generate_pt_test(gelu_configs_long, GeluBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 6
Column: 1
import torch
"""
Microbenchmarks for the gelu operators.
"""
gelu_configs_long = op_bench.cross_product_configs(
N=[1, 4],
Reported by Pylint.
Line: 22
Column: 9
class GeluBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device):
self.inputs = {
"input": torch.rand(N, C, H, W, device=device)
}
def forward(self, input):
return torch.nn.functional.gelu(input)
Reported by Pylint.
Line: 26
Column: 23
"input": torch.rand(N, C, H, W, device=device)
}
def forward(self, input):
return torch.nn.functional.gelu(input)
op_bench.generate_pt_test(gelu_configs_long, GeluBenchmark)
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""
Microbenchmarks for the gelu operators.
"""
Reported by Pylint.
Line: 20
Column: 1
)
class GeluBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device):
self.inputs = {
"input": torch.rand(N, C, H, W, device=device)
}
Reported by Pylint.
Line: 21
Column: 5
class GeluBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device):
self.inputs = {
"input": torch.rand(N, C, H, W, device=device)
}
def forward(self, input):
Reported by Pylint.
benchmarks/operator_benchmark/pt/qgroupnorm_test.py
17 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized groupnorm operator."""
groupnorm_configs_short = op_bench.cross_product_configs(
dims=(
Reported by Pylint.
Line: 8
Column: 27
"""Microbenchmarks for quantized groupnorm operator."""
groupnorm_configs_short = op_bench.cross_product_configs(
dims=(
(32, 8, 16),
(32, 8, 56, 56),
),
num_groups=(2, 4),
Reported by Pylint.
Line: 19
Column: 27
)
class QGroupNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, num_groups, dtype):
X = (torch.rand(*dims) - 0.5) * 256
num_channels = dims[1]
scale = 1.0
Reported by Pylint.
Line: 45
Column: 1
output_zero_point=Y_zero_point)
op_bench.generate_pt_test(groupnorm_configs_short, QGroupNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 6
Column: 1
import torch
"""Microbenchmarks for quantized groupnorm operator."""
groupnorm_configs_short = op_bench.cross_product_configs(
dims=(
(32, 8, 16),
(32, 8, 56, 56),
Reported by Pylint.
Line: 27
Column: 9
scale = 1.0
zero_point = 0
self.inputs = {
"qX": torch.quantize_per_tensor(
X, scale=scale, zero_point=zero_point, dtype=dtype),
"num_groups": num_groups,
"weight": torch.rand(num_channels, dtype=torch.float),
"bias": torch.rand(num_channels, dtype=torch.float),
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized groupnorm operator."""
groupnorm_configs_short = op_bench.cross_product_configs(
dims=(
Reported by Pylint.
Line: 19
Column: 1
)
class QGroupNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, num_groups, dtype):
X = (torch.rand(*dims) - 0.5) * 256
num_channels = dims[1]
scale = 1.0
Reported by Pylint.
Line: 21
Column: 5
class QGroupNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, num_groups, dtype):
X = (torch.rand(*dims) - 0.5) * 256
num_channels = dims[1]
scale = 1.0
zero_point = 0
Reported by Pylint.
Line: 22
Column: 9
class QGroupNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, num_groups, dtype):
X = (torch.rand(*dims) - 0.5) * 256
num_channels = dims[1]
scale = 1.0
zero_point = 0
self.inputs = {
Reported by Pylint.
caffe2/python/mkl/mkl_fill_op_test.py
17 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 14
Column: 22
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLFillTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 4), c=st.integers(1, 4),
h=st.integers(1, 4), w=st.integers(1, 4),
filler=st.sampled_from(
Reported by Pylint.
Line: 24
Column: 55
),
seed=st.integers(5, 10),
**mu.gcs_cpu_mkl)
def test_mkl_fill(self, n, c, h, w, filler, seed, gc, dc):
op = core.CreateOperator(
filler,
[],
["Y"],
shape=[n, c, h, w],
Reported by Pylint.
Line: 36
Column: 5
self.assertDeviceChecks(dc, op, [], [0])
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
from caffe2.python import core, workspace
Reported by Pylint.
Line: 16
Column: 1
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLFillTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 4), c=st.integers(1, 4),
h=st.integers(1, 4), w=st.integers(1, 4),
filler=st.sampled_from(
["XavierFill", "ConstantFill", "GaussianFill", "MSRAFill"]
),
Reported by Pylint.
Line: 24
Column: 5
),
seed=st.integers(5, 10),
**mu.gcs_cpu_mkl)
def test_mkl_fill(self, n, c, h, w, filler, seed, gc, dc):
op = core.CreateOperator(
filler,
[],
["Y"],
shape=[n, c, h, w],
Reported by Pylint.
Line: 24
Column: 5
),
seed=st.integers(5, 10),
**mu.gcs_cpu_mkl)
def test_mkl_fill(self, n, c, h, w, filler, seed, gc, dc):
op = core.CreateOperator(
filler,
[],
["Y"],
shape=[n, c, h, w],
Reported by Pylint.
Line: 24
Column: 5
),
seed=st.integers(5, 10),
**mu.gcs_cpu_mkl)
def test_mkl_fill(self, n, c, h, w, filler, seed, gc, dc):
op = core.CreateOperator(
filler,
[],
["Y"],
shape=[n, c, h, w],
Reported by Pylint.
benchmarks/operator_benchmark/pt/as_strided_test.py
17 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
from typing import List
"""Microbenchmarks for as_strided operator"""
# Configs for PT as_strided operator
Reported by Pylint.
Line: 10
Column: 28
# Configs for PT as_strided operator
as_strided_configs_short = op_bench.config_list(
attr_names=["M", "N", "size", "stride", "storage_offset"],
attrs=[
[8, 8, (2, 2), (1, 1), 0],
[256, 256, (32, 32), (1, 1), 0],
[512, 512, (64, 64), (2, 2), 1],
Reported by Pylint.
Line: 23
Column: 27
tags=["short"],
)
as_strided_configs_long = op_bench.cross_product_configs(
M=[512],
N=[1024],
size=[(16, 16), (128, 128)],
stride=[(1, 1)],
storage_offset=[0, 1],
Reported by Pylint.
Line: 34
Column: 27
)
class As_stridedBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, size, stride, storage_offset, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device),
"size": size,
"stride": stride,
Reported by Pylint.
Line: 51
Column: 1
input_one, size, stride, storage_offset)
op_bench.generate_pt_test(as_strided_configs_short + as_strided_configs_long,
As_stridedBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 6
Column: 1
from typing import List
"""Microbenchmarks for as_strided operator"""
# Configs for PT as_strided operator
as_strided_configs_short = op_bench.config_list(
attr_names=["M", "N", "size", "stride", "storage_offset"],
Reported by Pylint.
Line: 36
Column: 9
class As_stridedBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, size, stride, storage_offset, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device),
"size": size,
"stride": stride,
"storage_offset": storage_offset
}
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
from typing import List
"""Microbenchmarks for as_strided operator"""
# Configs for PT as_strided operator
Reported by Pylint.
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
from typing import List
"""Microbenchmarks for as_strided operator"""
# Configs for PT as_strided operator
Reported by Pylint.
Line: 34
Column: 1
)
class As_stridedBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, size, stride, storage_offset, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device),
"size": size,
"stride": stride,
Reported by Pylint.
benchmarks/operator_benchmark/c2/quantile_op_test.py
17 issues
Line: 1
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for QuantileOp operator."""
# Configs for C2 QuantileOp operator
Reported by Pylint.
Line: 3
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for QuantileOp operator."""
# Configs for C2 QuantileOp operator
Reported by Pylint.
Line: 4
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for QuantileOp operator."""
# Configs for C2 QuantileOp operator
Reported by Pylint.
Line: 10
Column: 28
"""Microbenchmarks for QuantileOp operator."""
# Configs for C2 QuantileOp operator
quantile_op_long_configs = op_bench.cross_product_configs(
M=[32, 64, 128], N=range(32, 128, 32), dtype=["float", "double"], tags=["long"]
)
quantile_op_short_configs = op_bench.config_list(
Reported by Pylint.
Line: 15
Column: 29
)
quantile_op_short_configs = op_bench.config_list(
attrs=[
[16, 16, "float"],
[16, 16, "double"],
[64, 64, "float"],
[64, 64, "double"],
Reported by Pylint.
Line: 3
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for QuantileOp operator."""
# Configs for C2 QuantileOp operator
Reported by Pylint.
Line: 7
Column: 1
from caffe2.python import core
"""Microbenchmarks for QuantileOp operator."""
# Configs for C2 QuantileOp operator
quantile_op_long_configs = op_bench.cross_product_configs(
M=[32, 64, 128], N=range(32, 128, 32), dtype=["float", "double"], tags=["long"]
)
Reported by Pylint.
Line: 29
Column: 9
class QuantileOpBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, dtype):
self.data = [self.tensor([N], dtype) for _ in range(M)]
self.quantile = 0.3
self.output = self.tensor([1], dtype)
self.set_module_name("quantile_op")
def forward(self):
Reported by Pylint.
Line: 30
Column: 9
class QuantileOpBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, dtype):
self.data = [self.tensor([N], dtype) for _ in range(M)]
self.quantile = 0.3
self.output = self.tensor([1], dtype)
self.set_module_name("quantile_op")
def forward(self):
op = core.CreateOperator(
Reported by Pylint.
Line: 31
Column: 9
def init(self, M, N, dtype):
self.data = [self.tensor([N], dtype) for _ in range(M)]
self.quantile = 0.3
self.output = self.tensor([1], dtype)
self.set_module_name("quantile_op")
def forward(self):
op = core.CreateOperator(
"Quantile", inputs=self.data, outputs=self.output, quantile=self.quantile
Reported by Pylint.
benchmarks/instruction_counts/core/utils.py
17 issues
Line: 7
Column: 1
import textwrap
from typing import List, Optional, Tuple
from torch.utils.benchmark import _make_temp_dir
from core.api import GroupedBenchmark, TimerArgs
from core.types import Definition, FlatIntermediateDefinition, Label
Reported by Pylint.
Line: 15
Column: 5
_TEMPDIR: Optional[str] = None
def get_temp_dir() -> str:
global _TEMPDIR
if _TEMPDIR is None:
_TEMPDIR = _make_temp_dir(prefix="instruction_count_microbenchmarks", gc_dev_shm=True)
atexit.register(shutil.rmtree, path=_TEMPDIR)
return _TEMPDIR
Reported by Pylint.
Line: 74
Column: 5
assert len(lines) >= 3, f"Invalid string:\n{stmts}"
column_header_pattern = r"^Python\s{35}\| C\+\+(\s*)$"
signature_pattern = r"^: f\((.*)\)( -> (.+))?\s*$"
separation_pattern = r"^[-]{40} | [-]{40}$"
code_pattern = r"^(.{40}) \|($| (.*)$)"
column_match = re.search(column_header_pattern, lines[0])
if column_match is None:
Reported by Pylint.
Line: 1
Column: 1
import atexit
import shutil
import re
import textwrap
from typing import List, Optional, Tuple
from torch.utils.benchmark import _make_temp_dir
from core.api import GroupedBenchmark, TimerArgs
Reported by Pylint.
Line: 14
Column: 1
_TEMPDIR: Optional[str] = None
def get_temp_dir() -> str:
global _TEMPDIR
if _TEMPDIR is None:
_TEMPDIR = _make_temp_dir(prefix="instruction_count_microbenchmarks", gc_dev_shm=True)
atexit.register(shutil.rmtree, path=_TEMPDIR)
return _TEMPDIR
Reported by Pylint.
Line: 29
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
) -> None:
for k, value in sub_schema.items():
if isinstance(k, tuple):
assert all(isinstance(ki, str) for ki in k)
key_suffix: Label = k
elif k is None:
key_suffix = ()
else:
assert isinstance(k, str)
Reported by Bandit.
Line: 34
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
elif k is None:
key_suffix = ()
else:
assert isinstance(k, str)
key_suffix = (k,)
key: Label = key_prefix + key_suffix
if isinstance(value, (TimerArgs, GroupedBenchmark)):
assert key not in result, f"duplicate key: {key}"
Reported by Bandit.
Line: 39
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
key: Label = key_prefix + key_suffix
if isinstance(value, (TimerArgs, GroupedBenchmark)):
assert key not in result, f"duplicate key: {key}"
result[key] = value
else:
assert isinstance(value, dict)
_flatten(key_prefix=key, sub_schema=value, result=result)
Reported by Bandit.
Line: 42
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert key not in result, f"duplicate key: {key}"
result[key] = value
else:
assert isinstance(value, dict)
_flatten(key_prefix=key, sub_schema=value, result=result)
def flatten(schema: Definition) -> FlatIntermediateDefinition:
"""See types.py for an explanation of nested vs. flat definitions."""
Reported by Bandit.
Line: 52
Column: 12
_flatten(key_prefix=(), sub_schema=schema, result=result)
# Ensure that we produced a valid flat definition.
for k, v in result.items():
assert isinstance(k, tuple)
assert all(isinstance(ki, str) for ki in k)
assert isinstance(v, (TimerArgs, GroupedBenchmark))
return result
Reported by Pylint.