The following issues were found
benchmarks/instruction_counts/execution/runner.py
20 issues
Line: 249
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b602_subprocess_popen_with_shell_equals_true.html
cmd = f'{source_cmd}{PYTHON_CMD} -c "import torch"'
proc = subprocess.run(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
executable=SHELL,
)
Reported by Bandit.
Line: 169
Column: 13
def _enqueue_new_jobs(self) -> None:
work_queue: List[WorkOrder] = []
for i, work_order in enumerate(self._work_queue):
self._currently_processed = work_order
cpu_list = self._core_pool.reserve(work_order.timer_args.num_threads)
if cpu_list is None:
work_queue.append(work_order)
Reported by Pylint.
Line: 191
Column: 53
eta = "Unknown"
else:
remaining = len(self._work_items) - len(self._results)
iters_remaining = math.ceil(remaining / self._core_pool._num_cores)
mean_time = sum(self._durations.values()) / len(self._durations)
eta_minutes = math.ceil(iters_remaining * mean_time / 60)
eta = f"~{eta_minutes:.0f} minute{'s' if eta_minutes > 1 else ''}"
print(f"\r{fraction} ({elapsed}), ETA: {eta}", end="")
Reported by Pylint.
Line: 247
Column: 20
for source_cmd in (source_cmds or {""}):
cmd = f'{source_cmd}{PYTHON_CMD} -c "import torch"'
proc = subprocess.run(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
Reported by Pylint.
Line: 4
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
"""Run benchmarks while handling parallelism, isolation, and fault tolerance."""
import math
import multiprocessing
import subprocess
import textwrap
import threading
import time
from typing import Dict, List, Optional, Set, Tuple, Union
Reported by Bandit.
Line: 11
Column: 1
from typing import Dict, List, Optional, Set, Tuple, Union
from execution.work import PYTHON_CMD, SHELL, InProgress, WorkOrder
from worker.main import WorkerFailure, WorkerOutput
CPU_COUNT: int = multiprocessing.cpu_count()
Reported by Pylint.
Line: 39
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
a balance between rigor and engineering complexity.
"""
def __init__(self, min_core_id: int, max_core_id: int) -> None:
assert min_core_id >= 0
assert max_core_id >= min_core_id
assert max_core_id < CPU_COUNT
self._min_core_id: int = min_core_id
self._max_core_id: int = max_core_id
Reported by Bandit.
Line: 40
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
"""
def __init__(self, min_core_id: int, max_core_id: int) -> None:
assert min_core_id >= 0
assert max_core_id >= min_core_id
assert max_core_id < CPU_COUNT
self._min_core_id: int = min_core_id
self._max_core_id: int = max_core_id
self._num_cores = max_core_id - min_core_id + 1
Reported by Bandit.
Line: 41
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def __init__(self, min_core_id: int, max_core_id: int) -> None:
assert min_core_id >= 0
assert max_core_id >= min_core_id
assert max_core_id < CPU_COUNT
self._min_core_id: int = min_core_id
self._max_core_id: int = max_core_id
self._num_cores = max_core_id - min_core_id + 1
print(f"Core pool created: cores {self._min_core_id}-{self._max_core_id}")
Reported by Bandit.
Line: 54
Column: 5
self._reservations: Dict[str, Tuple[int, ...]] = {}
self._lock = threading.Lock()
def reserve(self, n: int) -> Optional[str]:
"""Simple first-fit policy.
If successful, return a string for `taskset`. Otherwise, return None.
"""
with self._lock:
Reported by Pylint.
benchmarks/tensorexpr/attention.py
20 issues
Line: 5
Column: 1
# for benchmarking and some control flow stripped out.
# https://github.com/mlperf/training/blob/master/rnn_translator/pytorch/seq2seq/models/attention.py
from . import benchmark
import torch
class BahdanauAttention(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, t_q, t_k, n):
Reported by Pylint.
Line: 6
Column: 1
# https://github.com/mlperf/training/blob/master/rnn_translator/pytorch/seq2seq/models/attention.py
from . import benchmark
import torch
class BahdanauAttention(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, t_q, t_k, n):
super().__init__(mode, device, dtype)
Reported by Pylint.
Line: 1
Column: 1
# This is a copy of rnn_attention from MLPerf, with some common sizes hardcoded
# for benchmarking and some control flow stripped out.
# https://github.com/mlperf/training/blob/master/rnn_translator/pytorch/seq2seq/models/attention.py
from . import benchmark
import torch
class BahdanauAttention(benchmark.Benchmark):
Reported by Pylint.
Line: 6
Column: 1
# https://github.com/mlperf/training/blob/master/rnn_translator/pytorch/seq2seq/models/attention.py
from . import benchmark
import torch
class BahdanauAttention(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, t_q, t_k, n):
super().__init__(mode, device, dtype)
Reported by Pylint.
Line: 9
Column: 1
import torch
class BahdanauAttention(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, t_q, t_k, n):
super().__init__(mode, device, dtype)
self.b = b
self.t_q = t_q
self.t_k = t_k
Reported by Pylint.
Line: 9
Column: 1
import torch
class BahdanauAttention(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, t_q, t_k, n):
super().__init__(mode, device, dtype)
self.b = b
self.t_q = t_q
self.t_k = t_k
Reported by Pylint.
Line: 10
Column: 5
class BahdanauAttention(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, t_q, t_k, n):
super().__init__(mode, device, dtype)
self.b = b
self.t_q = t_q
self.t_k = t_k
self.n = n
Reported by Pylint.
Line: 10
Column: 5
class BahdanauAttention(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, t_q, t_k, n):
super().__init__(mode, device, dtype)
self.b = b
self.t_q = t_q
self.t_k = t_k
self.n = n
Reported by Pylint.
Line: 10
Column: 5
class BahdanauAttention(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, t_q, t_k, n):
super().__init__(mode, device, dtype)
self.b = b
self.t_q = t_q
self.t_k = t_k
self.n = n
Reported by Pylint.
Line: 12
Column: 9
class BahdanauAttention(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, t_q, t_k, n):
super().__init__(mode, device, dtype)
self.b = b
self.t_q = t_q
self.t_k = t_k
self.n = n
self.att_query = self.rand(
[b, t_q, n], device=device, dtype=dtype, requires_grad=self.requires_grad
Reported by Pylint.
benchmarks/operator_benchmark/pt/qinterpolate_test.py
20 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
'''Microbenchmarks for the quantized interpolate op.
Note: We are not benchmarking `upsample` as it is being depricated, and calls
the `interpolate` anyway.
'''
Reported by Pylint.
Line: 10
Column: 29
the `interpolate` anyway.
'''
qinterpolate_long_configs = op_bench.config_list(
attr_names=['M', 'N', 'K'],
attrs=[
[512, 512, 512],
],
cross_product_configs={
Reported by Pylint.
Line: 25
Column: 30
)
qinterpolate_short_configs = op_bench.config_list(
attr_names=['M', 'N', 'K', 'dtype', 'mode', 'scale', 'contig'],
attrs=[
[32, 32, 32, torch.quint8, 'nearest', 0.5, True], # Downsample
[32, 32, 32, torch.quint8, 'bilinear', 0.5, True], # Downsample
[32, 32, 32, torch.quint8, 'nearest', 2.0, True], # Upsample
Reported by Pylint.
Line: 37
Column: 29
)
class QInterpolateBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, dtype, mode, scale, contig):
f_input = (torch.rand(1, M, N, K) - 0.5) * 256
scale = 0.1
zero_point = 42
self.q_input = torch.quantize_per_tensor(f_input, scale=scale,
Reported by Pylint.
Line: 46
Column: 39
zero_point=zero_point,
dtype=dtype)
if not contig:
permute_dims = list(range(q_input.ndim))[::-1]
self.q_input = self.q_input.permute(permute_dims)
self.inputs = {
"q_input": self.q_input,
"scale_factor": scale,
Reported by Pylint.
Line: 61
Column: 1
q_input, scale_factor=scale_factor, mode=mode)
op_bench.generate_pt_test(qinterpolate_short_configs + qinterpolate_long_configs,
QInterpolateBenchmark)
if __name__ == '__main__':
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
'''Microbenchmarks for the quantized interpolate op.
Note: We are not benchmarking `upsample` as it is being depricated, and calls
the `interpolate` anyway.
'''
Reported by Pylint.
Line: 19
Column: 3
'dtype': [torch.quint8, torch.qint8, torch.qint32],
'mode': ['nearest', 'bilinear'],
'scale': [0.5, 1.0, 2.0],
'contig': [True], # TODO: Add `False` after #29435
},
tags=['long']
)
Reported by Pylint.
Line: 42
Column: 9
f_input = (torch.rand(1, M, N, K) - 0.5) * 256
scale = 0.1
zero_point = 42
self.q_input = torch.quantize_per_tensor(f_input, scale=scale,
zero_point=zero_point,
dtype=dtype)
if not contig:
permute_dims = list(range(q_input.ndim))[::-1]
self.q_input = self.q_input.permute(permute_dims)
Reported by Pylint.
Line: 47
Column: 13
dtype=dtype)
if not contig:
permute_dims = list(range(q_input.ndim))[::-1]
self.q_input = self.q_input.permute(permute_dims)
self.inputs = {
"q_input": self.q_input,
"scale_factor": scale,
"mode": mode
Reported by Pylint.
benchmarks/operator_benchmark/pt/nan_to_num_test.py
20 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
import math
"""Microbenchmarks for torch.nan_to_num / nan_to_num_ operators"""
# Configs for PT torch.nan_to_num / nan_to_num_ operators
Reported by Pylint.
Line: 10
Column: 23
# Configs for PT torch.nan_to_num / nan_to_num_ operators
nan_to_num_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['nan_to_num', torch.nan_to_num],
['nan_to_num_', torch.nan_to_num_],
],
Reported by Pylint.
Line: 18
Column: 27
],
)
nan_to_num_long_configs = op_bench.cross_product_configs(
M=[32, 64, 128],
N=range(32, 128, 32),
dtype=[torch.float, torch.double],
replace_inf=[True, False],
tags=["long"],
Reported by Pylint.
Line: 27
Column: 28
)
nan_to_num_short_configs = op_bench.cross_product_configs(
M=[16, 64],
N=[64, 64],
dtype=[torch.float, torch.double],
replace_inf=[True, False],
tags=["short"],
Reported by Pylint.
Line: 36
Column: 27
)
class ReplaceNaNBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype, replace_inf, op_func):
input = torch.randn(M, N, dtype=dtype)
input[0][0] = float("nan")
self.inputs = {
"input": input,
Reported by Pylint.
Line: 55
Column: 1
return self.op_func(input, nan=1.0, posinf=math.inf, neginf=-math.inf)
op_bench.generate_pt_tests_from_op_list(
nan_to_num_ops_list,
nan_to_num_long_configs + nan_to_num_short_configs,
ReplaceNaNBenchmark,
)
Reported by Pylint.
Line: 6
Column: 1
import math
"""Microbenchmarks for torch.nan_to_num / nan_to_num_ operators"""
# Configs for PT torch.nan_to_num / nan_to_num_ operators
nan_to_num_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
Reported by Pylint.
Line: 38
Column: 9
class ReplaceNaNBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype, replace_inf, op_func):
input = torch.randn(M, N, dtype=dtype)
input[0][0] = float("nan")
self.inputs = {
"input": input,
"replace_inf": replace_inf
}
Reported by Pylint.
Line: 40
Column: 9
def init(self, M, N, dtype, replace_inf, op_func):
input = torch.randn(M, N, dtype=dtype)
input[0][0] = float("nan")
self.inputs = {
"input": input,
"replace_inf": replace_inf
}
self.op_func = op_func
self.set_module_name("nan_to_num")
Reported by Pylint.
Line: 44
Column: 9
"input": input,
"replace_inf": replace_inf
}
self.op_func = op_func
self.set_module_name("nan_to_num")
def forward(self, input, replace_inf: bool):
# compare inplace
if replace_inf:
Reported by Pylint.
caffe2/contrib/tensorboard/tensorboard_exporter_test.py
20 issues
Line: 596
Column: 9
op = caffe2_pb2.OperatorDef()
op.type = 'foo'
op.input.extend(['foo'])
tb._fill_missing_operator_names([op])
self.assertEqual(op.input[0], 'foo')
self.assertEqual(op.name, 'foo_1')
def test_that_replacing_colons_gives_non_colliding_names(self):
# .. and update shapes
Reported by Pylint.
Line: 606
Column: 28
op.name = 'foo:0'
op.input.extend(['foo:0', 'foo$0'])
shapes = {'foo:0': [1]}
track_blob_names = tb._get_blob_names([op])
tb._replace_colons(shapes, track_blob_names, [op], '$')
self.assertEqual(op.input[0], 'foo$0')
self.assertEqual(op.input[1], 'foo$0_1')
# Collision but blobs and op names are handled later by
# _fill_missing_operator_names.
Reported by Pylint.
Line: 607
Column: 9
op.input.extend(['foo:0', 'foo$0'])
shapes = {'foo:0': [1]}
track_blob_names = tb._get_blob_names([op])
tb._replace_colons(shapes, track_blob_names, [op], '$')
self.assertEqual(op.input[0], 'foo$0')
self.assertEqual(op.input[1], 'foo$0_1')
# Collision but blobs and op names are handled later by
# _fill_missing_operator_names.
self.assertEqual(op.name, 'foo$0')
Reported by Pylint.
Line: 625
Column: 28
op.name = 'foo_grad'
op.input.extend(['foo_grad', 'foo_grad_1'])
shapes = {'foo_grad': [1]}
track_blob_names = tb._get_blob_names([op])
tb._add_gradient_scope(shapes, track_blob_names, [op])
self.assertEqual(op.input[0], 'GRADIENTS/foo_grad')
self.assertEqual(op.input[1], 'GRADIENTS/foo_grad_1')
self.assertEqual(op.name, 'GRADIENTS/foo_grad')
self.assertEqual(len(shapes), 1)
Reported by Pylint.
Line: 626
Column: 9
op.input.extend(['foo_grad', 'foo_grad_1'])
shapes = {'foo_grad': [1]}
track_blob_names = tb._get_blob_names([op])
tb._add_gradient_scope(shapes, track_blob_names, [op])
self.assertEqual(op.input[0], 'GRADIENTS/foo_grad')
self.assertEqual(op.input[1], 'GRADIENTS/foo_grad_1')
self.assertEqual(op.name, 'GRADIENTS/foo_grad')
self.assertEqual(len(shapes), 1)
self.assertEqual(shapes['GRADIENTS/foo_grad'], [1])
Reported by Pylint.
Line: 646
Column: 28
op2.output.extend(['foo'])
op2.output.extend(['foo_1'])
shapes = {'foo': [1], 'foo_1': [2]}
track_blob_names = tb._get_blob_names([op1, op2])
tb._convert_to_ssa(shapes, track_blob_names, [op1, op2])
self.assertEqual(op1.output[0], 'foo')
self.assertEqual(op2.input[0], 'foo')
self.assertEqual(op2.output[0], 'foo_1')
# Unfortunate name but we do not parse original `_` for now.
Reported by Pylint.
Line: 647
Column: 9
op2.output.extend(['foo_1'])
shapes = {'foo': [1], 'foo_1': [2]}
track_blob_names = tb._get_blob_names([op1, op2])
tb._convert_to_ssa(shapes, track_blob_names, [op1, op2])
self.assertEqual(op1.output[0], 'foo')
self.assertEqual(op2.input[0], 'foo')
self.assertEqual(op2.output[0], 'foo_1')
# Unfortunate name but we do not parse original `_` for now.
self.assertEqual(op2.output[1], 'foo_1_1')
Reported by Pylint.
Line: 1
Column: 1
import unittest
from caffe2.proto import caffe2_pb2
import caffe2.python.cnn as cnn
Reported by Pylint.
Line: 591
Column: 1
"""
class TensorboardExporterTest(unittest.TestCase):
def test_that_operators_gets_non_colliding_names(self):
op = caffe2_pb2.OperatorDef()
op.type = 'foo'
op.input.extend(['foo'])
tb._fill_missing_operator_names([op])
Reported by Pylint.
Line: 592
Column: 5
class TensorboardExporterTest(unittest.TestCase):
def test_that_operators_gets_non_colliding_names(self):
op = caffe2_pb2.OperatorDef()
op.type = 'foo'
op.input.extend(['foo'])
tb._fill_missing_operator_names([op])
self.assertEqual(op.input[0], 'foo')
Reported by Pylint.
benchmarks/operator_benchmark/pt/softmax_test.py
20 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for the softmax operators.
"""
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for the softmax operators.
"""
Reported by Pylint.
Line: 13
Column: 25
# Configs for softmax ops
softmax_configs_short = op_bench.config_list(
attr_names=[
'N', 'C', 'H', 'W'
],
attrs=[
[1, 3, 256, 256],
Reported by Pylint.
Line: 28
Column: 24
)
softmax_configs_long = op_bench.cross_product_configs(
N=[8, 16],
C=[3],
H=[256, 512],
W=[256, 512],
device=['cpu', 'cuda'],
Reported by Pylint.
Line: 38
Column: 20
)
softmax_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['Softmax', nn.Softmax],
['Softmax2d', nn.Softmax2d],
['LogSoftmax', nn.LogSoftmax],
Reported by Pylint.
Line: 48
Column: 24
)
class SoftmaxBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device, op_func):
self.inputs = {
"input": torch.rand(N, C, H, W, device=device)
}
self.op_func = op_func()
Reported by Pylint.
Line: 59
Column: 1
return self.op_func(input)
op_bench.generate_pt_tests_from_op_list(softmax_ops_list,
softmax_configs_short + softmax_configs_long,
SoftmaxBenchmark)
if __name__ == "__main__":
Reported by Pylint.
Line: 7
Column: 1
import torch.nn as nn
"""
Microbenchmarks for the softmax operators.
"""
# Configs for softmax ops
Reported by Pylint.
Line: 50
Column: 9
class SoftmaxBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device, op_func):
self.inputs = {
"input": torch.rand(N, C, H, W, device=device)
}
self.op_func = op_func()
def forward(self, input):
Reported by Pylint.
Line: 53
Column: 9
self.inputs = {
"input": torch.rand(N, C, H, W, device=device)
}
self.op_func = op_func()
def forward(self, input):
return self.op_func(input)
Reported by Pylint.
caffe2/python/models/imagenet_trainer_test_utils.py
19 issues
Line: 64
Column: 14
param_to_grad = model.AddGradientOperators([loss])
(shapes, types) = workspace.InferShapesAndTypes(
[model.param_init_net, model.net],
{data_blob: [4, 3, 227, 227],
label_blob: [4]},
)
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import time
from caffe2.python import workspace, cnn, memonger, core
Reported by Pylint.
Line: 7
Column: 1
import numpy as np
import time
from caffe2.python import workspace, cnn, memonger, core
def has_blob(proto, needle):
for op in proto.op:
Reported by Pylint.
Line: 11
Column: 1
from caffe2.python import workspace, cnn, memonger, core
def has_blob(proto, needle):
for op in proto.op:
for inp in op.input:
if inp == needle:
return True
for outp in op.output:
Reported by Pylint.
Line: 12
Column: 9
from caffe2.python import workspace, cnn, memonger, core
def has_blob(proto, needle):
for op in proto.op:
for inp in op.input:
if inp == needle:
return True
for outp in op.output:
if outp == needle:
Reported by Pylint.
Line: 22
Column: 1
return False
def count_blobs(proto):
blobs = set()
for op in proto.op:
blobs = blobs.union(set(op.input)).union(set(op.output))
return len(blobs)
Reported by Pylint.
Line: 24
Column: 9
def count_blobs(proto):
blobs = set()
for op in proto.op:
blobs = blobs.union(set(op.input)).union(set(op.output))
return len(blobs)
def count_shared_blobs(proto):
Reported by Pylint.
Line: 29
Column: 1
return len(blobs)
def count_shared_blobs(proto):
blobs = set()
for op in proto.op:
blobs = blobs.union(set(op.input)).union(set(op.output))
return len([b for b in blobs if "_shared" in b])
Reported by Pylint.
Line: 31
Column: 9
def count_shared_blobs(proto):
blobs = set()
for op in proto.op:
blobs = blobs.union(set(op.input)).union(set(op.output))
return len([b for b in blobs if "_shared" in b])
def test_shared_grads(
Reported by Pylint.
Line: 36
Column: 1
return len([b for b in blobs if "_shared" in b])
def test_shared_grads(
with_shapes,
create_model,
conv_blob,
last_out_blob,
data_blob='gpu_0/data',
Reported by Pylint.
benchmarks/operator_benchmark/c2/clip_ranges_test.py
19 issues
Line: 1
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core, dyndep
dyndep.InitOpsLibrary("@/caffe2/caffe2/fb/operators:clip_ranges_op")
"""Microbenchmarks for ClipRanges operator."""
Reported by Pylint.
Line: 3
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core, dyndep
dyndep.InitOpsLibrary("@/caffe2/caffe2/fb/operators:clip_ranges_op")
"""Microbenchmarks for ClipRanges operator."""
Reported by Pylint.
Line: 4
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core, dyndep
dyndep.InitOpsLibrary("@/caffe2/caffe2/fb/operators:clip_ranges_op")
"""Microbenchmarks for ClipRanges operator."""
Reported by Pylint.
Line: 11
Column: 28
"""Microbenchmarks for ClipRanges operator."""
# Configs for C2 ClipRanges operator
clip_ranges_long_configs = op_bench.cross_product_configs(
LENGTH=range(1, 100),
M=[1],
N=[2],
MAX_LENGTH=range(1, 100),
dtype=["int32"],
Reported by Pylint.
Line: 21
Column: 29
)
clip_ranges_short_configs = op_bench.config_list(
attrs=[
[6, 1, 2, 1, "int32"],
[7, 1, 2, 2, "int32"],
[8, 1, 2, 3, "int32"],
[9, 1, 2, 4, "int32"],
Reported by Pylint.
Line: 3
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core, dyndep
dyndep.InitOpsLibrary("@/caffe2/caffe2/fb/operators:clip_ranges_op")
"""Microbenchmarks for ClipRanges operator."""
Reported by Pylint.
Line: 8
Column: 1
dyndep.InitOpsLibrary("@/caffe2/caffe2/fb/operators:clip_ranges_op")
"""Microbenchmarks for ClipRanges operator."""
# Configs for C2 ClipRanges operator
clip_ranges_long_configs = op_bench.cross_product_configs(
LENGTH=range(1, 100),
M=[1],
Reported by Pylint.
Line: 36
Column: 9
class ClipRangesBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, LENGTH, M, N, MAX_LENGTH, dtype):
self.input = self.tensor([LENGTH, M, N], dtype)
self.max_length = MAX_LENGTH
self.set_module_name("clip_ranges")
def forward(self):
op = core.CreateOperator("ClipRanges", self.input, self.input, max_length=self.max_length)
Reported by Pylint.
Line: 37
Column: 9
class ClipRangesBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, LENGTH, M, N, MAX_LENGTH, dtype):
self.input = self.tensor([LENGTH, M, N], dtype)
self.max_length = MAX_LENGTH
self.set_module_name("clip_ranges")
def forward(self):
op = core.CreateOperator("ClipRanges", self.input, self.input, max_length=self.max_length)
return op
Reported by Pylint.
Line: 1
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core, dyndep
dyndep.InitOpsLibrary("@/caffe2/caffe2/fb/operators:clip_ranges_op")
"""Microbenchmarks for ClipRanges operator."""
Reported by Pylint.
.circleci/generate_config_yml.py
19 issues
Line: 99
Column: 34
master_deps = set()
def _save_requires_if_master(item_type, item):
requires = item.get('requires', None)
item_name = item.get("name", None)
if not isinstance(requires, list):
return
if _is_master_item(item) or item_name in master_deps:
Reported by Pylint.
Line: 30
Column: 1
import cimodel.lib.miniyaml as miniyaml
class File(object):
"""
Verbatim copy the contents of a file into config.yml
"""
def __init__(self, filename):
Reported by Pylint.
Line: 30
Column: 1
import cimodel.lib.miniyaml as miniyaml
class File(object):
"""
Verbatim copy the contents of a file into config.yml
"""
def __init__(self, filename):
Reported by Pylint.
Line: 38
Column: 5
def __init__(self, filename):
self.filename = filename
def write(self, output_filehandle):
with open(os.path.join("verbatim-sources", self.filename)) as fh:
shutil.copyfileobj(fh, output_filehandle)
class FunctionGen(namedtuple("FunctionGen", "function depth")):
Reported by Pylint.
Line: 39
Column: 71
self.filename = filename
def write(self, output_filehandle):
with open(os.path.join("verbatim-sources", self.filename)) as fh:
shutil.copyfileobj(fh, output_filehandle)
class FunctionGen(namedtuple("FunctionGen", "function depth")):
__slots__ = ()
Reported by Pylint.
Line: 43
Column: 1
shutil.copyfileobj(fh, output_filehandle)
class FunctionGen(namedtuple("FunctionGen", "function depth")):
__slots__ = ()
class Treegen(FunctionGen):
"""
Reported by Pylint.
Line: 52
Column: 5
Insert the content of a YAML tree into config.yml
"""
def write(self, output_filehandle):
miniyaml.render(output_filehandle, self.function(), self.depth)
class Listgen(FunctionGen):
"""
Reported by Pylint.
Line: 61
Column: 5
Insert the content of a YAML list into config.yml
"""
def write(self, output_filehandle):
miniyaml.render(output_filehandle, self.function(), self.depth)
def horizontal_rule():
return "".join("#" * 78)
Reported by Pylint.
Line: 65
Column: 1
miniyaml.render(output_filehandle, self.function(), self.depth)
def horizontal_rule():
return "".join("#" * 78)
class Header(object):
def __init__(self, title, summary=None):
Reported by Pylint.
Line: 69
Column: 1
return "".join("#" * 78)
class Header(object):
def __init__(self, title, summary=None):
self.title = title
self.summary_lines = summary or []
def write(self, output_filehandle):
Reported by Pylint.
benchmarks/operator_benchmark/c2/batch_box_cox_test.py
19 issues
Line: 1
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for BatchBoxCox operator."""
# Configs for C2 BatchBoxCox operator
Reported by Pylint.
Line: 3
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for BatchBoxCox operator."""
# Configs for C2 BatchBoxCox operator
Reported by Pylint.
Line: 4
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for BatchBoxCox operator."""
# Configs for C2 BatchBoxCox operator
Reported by Pylint.
Line: 10
Column: 30
"""Microbenchmarks for BatchBoxCox operator."""
# Configs for C2 BatchBoxCox operator
batch_box_cox_long_configs = op_bench.cross_product_configs(
M=[32, 64, 128], N=range(32, 128, 32), dtype=["float", "double"], tags=["long"]
)
batch_box_cox_short_configs = op_bench.config_list(
Reported by Pylint.
Line: 15
Column: 31
)
batch_box_cox_short_configs = op_bench.config_list(
attrs=[
[16, 16, "float"],
[16, 16, "double"],
[64, 64, "float"],
[64, 64, "double"],
Reported by Pylint.
Line: 3
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for BatchBoxCox operator."""
# Configs for C2 BatchBoxCox operator
Reported by Pylint.
Line: 7
Column: 1
from caffe2.python import core
"""Microbenchmarks for BatchBoxCox operator."""
# Configs for C2 BatchBoxCox operator
batch_box_cox_long_configs = op_bench.cross_product_configs(
M=[32, 64, 128], N=range(32, 128, 32), dtype=["float", "double"], tags=["long"]
)
Reported by Pylint.
Line: 29
Column: 9
class BatchBoxCoxBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, dtype):
self.data = self.tensor([M, N], dtype)
self.lambda1 = self.tensor([N], dtype)
self.lambda2 = self.tensor([N], dtype)
self.output = self.tensor([1, 1], dtype)
self.set_module_name("batch_box_cox")
Reported by Pylint.
Line: 30
Column: 9
class BatchBoxCoxBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, dtype):
self.data = self.tensor([M, N], dtype)
self.lambda1 = self.tensor([N], dtype)
self.lambda2 = self.tensor([N], dtype)
self.output = self.tensor([1, 1], dtype)
self.set_module_name("batch_box_cox")
def forward(self):
Reported by Pylint.
Line: 31
Column: 9
def init(self, M, N, dtype):
self.data = self.tensor([M, N], dtype)
self.lambda1 = self.tensor([N], dtype)
self.lambda2 = self.tensor([N], dtype)
self.output = self.tensor([1, 1], dtype)
self.set_module_name("batch_box_cox")
def forward(self):
op = core.CreateOperator("BatchBoxCox", [self.data, self.lambda1, self.lambda2], self.output)
Reported by Pylint.