The following issues were found
tools/test/test_test_selections.py
20 issues
Line: 4
Column: 1
import random
import unittest
from tools.testing.test_selections import calculate_shards
from typing import Dict, List, Tuple
class TestCalculateShards(unittest.TestCase):
tests: List[str] = [
Reported by Pylint.
Line: 1
Column: 1
import random
import unittest
from tools.testing.test_selections import calculate_shards
from typing import Dict, List, Tuple
class TestCalculateShards(unittest.TestCase):
tests: List[str] = [
Reported by Pylint.
Line: 5
Column: 1
import unittest
from tools.testing.test_selections import calculate_shards
from typing import Dict, List, Tuple
class TestCalculateShards(unittest.TestCase):
tests: List[str] = [
'super_long_test',
Reported by Pylint.
Line: 8
Column: 1
from typing import Dict, List, Tuple
class TestCalculateShards(unittest.TestCase):
tests: List[str] = [
'super_long_test',
'long_test1',
'long_test2',
'normal_test1',
Reported by Pylint.
Line: 37
Column: 5
'short_test5': 0.01,
}
def assert_shards_equal(
self,
expected_shards: List[Tuple[float, List[str]]],
actual_shards: List[Tuple[float, List[str]]]
) -> None:
for expected, actual in zip(expected_shards, actual_shards):
Reported by Pylint.
Line: 46
Column: 5
self.assertAlmostEqual(expected[0], actual[0])
self.assertListEqual(expected[1], actual[1])
def test_calculate_2_shards_with_complete_test_times(self) -> None:
expected_shards = [
(60, ['super_long_test', 'normal_test3']),
(58.31, ['long_test1', 'long_test2', 'normal_test1', 'normal_test2', 'short_test1', 'short_test2',
'short_test3', 'short_test4', 'short_test5'])
]
Reported by Pylint.
Line: 49
Column: 1
def test_calculate_2_shards_with_complete_test_times(self) -> None:
expected_shards = [
(60, ['super_long_test', 'normal_test3']),
(58.31, ['long_test1', 'long_test2', 'normal_test1', 'normal_test2', 'short_test1', 'short_test2',
'short_test3', 'short_test4', 'short_test5'])
]
self.assert_shards_equal(expected_shards, calculate_shards(2, self.tests, self.test_times))
Reported by Pylint.
Line: 55
Column: 5
self.assert_shards_equal(expected_shards, calculate_shards(2, self.tests, self.test_times))
def test_calculate_5_shards_with_complete_test_times(self) -> None:
expected_shards = [
(55.0, ['super_long_test']),
(22.0, ['long_test1', ]),
(18.0, ['long_test2', ]),
(11.31, ['normal_test1', 'short_test1', 'short_test2', 'short_test3', 'short_test4', 'short_test5']),
Reported by Pylint.
Line: 60
Column: 1
(55.0, ['super_long_test']),
(22.0, ['long_test1', ]),
(18.0, ['long_test2', ]),
(11.31, ['normal_test1', 'short_test1', 'short_test2', 'short_test3', 'short_test4', 'short_test5']),
(12.0, ['normal_test2', 'normal_test3']),
]
self.assert_shards_equal(expected_shards, calculate_shards(5, self.tests, self.test_times))
Reported by Pylint.
Line: 66
Column: 5
self.assert_shards_equal(expected_shards, calculate_shards(5, self.tests, self.test_times))
def test_calculate_2_shards_with_incomplete_test_times(self) -> None:
incomplete_test_times = {k: v for k, v in self.test_times.items() if 'test1' in k}
expected_shards = [
(22.0, ['long_test1', 'long_test2', 'normal_test3', 'short_test3', 'short_test5']),
(10.0, ['normal_test1', 'short_test1', 'super_long_test', 'normal_test2', 'short_test2', 'short_test4']),
]
Reported by Pylint.
caffe2/python/mkl/mkl_sbn_op_test.py
20 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLSpatialBNTest(hu.HypothesisTestCase):
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
Reported by Pylint.
Line: 27
Column: 68
epsilon=st.floats(1e-5, 1e-2),
**mu.gcs)
def test_spatialbn_test_mode(self, size, input_channels,
batch_size, seed, order, epsilon, gc, dc):
np.random.seed(seed)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
Reported by Pylint.
Line: 57
Column: 13
**mu.gcs)
def test_spatialbn_train_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "running_mean", "running_var"],
["Y", "running_mean", "running_var", "saved_mean", "saved_var"],
order=order,
Reported by Pylint.
Line: 81
Column: 5
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
Line: 17
Column: 1
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLSpatialBNTest(hu.HypothesisTestCase):
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
#order=st.sampled_from(["NCHW", "NHWC"]),
Reported by Pylint.
Line: 26
Column: 5
order=st.sampled_from(["NCHW"]),
epsilon=st.floats(1e-5, 1e-2),
**mu.gcs)
def test_spatialbn_test_mode(self, size, input_channels,
batch_size, seed, order, epsilon, gc, dc):
np.random.seed(seed)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
Reported by Pylint.
Line: 26
Column: 5
order=st.sampled_from(["NCHW"]),
epsilon=st.floats(1e-5, 1e-2),
**mu.gcs)
def test_spatialbn_test_mode(self, size, input_channels,
batch_size, seed, order, epsilon, gc, dc):
np.random.seed(seed)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
Reported by Pylint.
benchmarks/operator_benchmark/pt_extension/cpp_extension_test.py
20 issues
Line: 3
Column: 1
import unittest
import benchmark_cpp_extension # noqa: F401
import torch
class TestConsumeOp(unittest.TestCase):
def test_jit_consume_op(self):
iters = 6
Reported by Pylint.
Line: 4
Column: 1
import unittest
import benchmark_cpp_extension # noqa: F401
import torch
class TestConsumeOp(unittest.TestCase):
def test_jit_consume_op(self):
iters = 6
Reported by Pylint.
Line: 3
Column: 1
import unittest
import benchmark_cpp_extension # noqa: F401
import torch
class TestConsumeOp(unittest.TestCase):
def test_jit_consume_op(self):
iters = 6
Reported by Pylint.
Line: 12
Column: 17
iters = 6
def foo(x):
for i in range(iters):
result = torch.ops.operator_benchmark._consume(torch.sum(x))
return result
r = torch.jit.trace(foo, (torch.rand(2, 2)))
Reported by Pylint.
Line: 13
Column: 26
def foo(x):
for i in range(iters):
result = torch.ops.operator_benchmark._consume(torch.sum(x))
return result
r = torch.jit.trace(foo, (torch.rand(2, 2)))
graph = str(r.graph)
Reported by Pylint.
Line: 30
Column: 17
iters = 6
def foo(x):
for i in range(iters):
result = torch.ops.operator_benchmark._consume(torch.chunk(x, 2))
return result
r = torch.jit.trace(foo, torch.rand(2, 2))
Reported by Pylint.
Line: 31
Column: 26
def foo(x):
for i in range(iters):
result = torch.ops.operator_benchmark._consume(torch.chunk(x, 2))
return result
r = torch.jit.trace(foo, torch.rand(2, 2))
graph = str(r.graph)
Reported by Pylint.
Line: 1
Column: 1
import unittest
import benchmark_cpp_extension # noqa: F401
import torch
class TestConsumeOp(unittest.TestCase):
def test_jit_consume_op(self):
iters = 6
Reported by Pylint.
Line: 7
Column: 1
import torch
class TestConsumeOp(unittest.TestCase):
def test_jit_consume_op(self):
iters = 6
def foo(x):
for i in range(iters):
Reported by Pylint.
Line: 8
Column: 5
class TestConsumeOp(unittest.TestCase):
def test_jit_consume_op(self):
iters = 6
def foo(x):
for i in range(iters):
result = torch.ops.operator_benchmark._consume(torch.sum(x))
Reported by Pylint.
caffe2/python/helpers/fc.py
20 issues
Line: 1
Column: 1
## @package fc
# Module caffe2.python.helpers.fc
from caffe2.python import core
from caffe2.python.modeling import initializers
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python.modeling.parameter_info import ParameterTags
def _FC_or_packed_FC(
model, op_call, blob_in, blob_out, dim_in, dim_out, weight_init=None,
bias_init=None, WeightInitializer=None, BiasInitializer=None,
enable_tensor_core=False, float16_compute=False, **kwargs
):
WeightInitializer = initializers.update_initializer(
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python.modeling.parameter_info import ParameterTags
def _FC_or_packed_FC(
model, op_call, blob_in, blob_out, dim_in, dim_out, weight_init=None,
bias_init=None, WeightInitializer=None, BiasInitializer=None,
enable_tensor_core=False, float16_compute=False, **kwargs
):
WeightInitializer = initializers.update_initializer(
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python.modeling.parameter_info import ParameterTags
def _FC_or_packed_FC(
model, op_call, blob_in, blob_out, dim_in, dim_out, weight_init=None,
bias_init=None, WeightInitializer=None, BiasInitializer=None,
enable_tensor_core=False, float16_compute=False, **kwargs
):
WeightInitializer = initializers.update_initializer(
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python.modeling.parameter_info import ParameterTags
def _FC_or_packed_FC(
model, op_call, blob_in, blob_out, dim_in, dim_out, weight_init=None,
bias_init=None, WeightInitializer=None, BiasInitializer=None,
enable_tensor_core=False, float16_compute=False, **kwargs
):
WeightInitializer = initializers.update_initializer(
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python.modeling.parameter_info import ParameterTags
def _FC_or_packed_FC(
model, op_call, blob_in, blob_out, dim_in, dim_out, weight_init=None,
bias_init=None, WeightInitializer=None, BiasInitializer=None,
enable_tensor_core=False, float16_compute=False, **kwargs
):
WeightInitializer = initializers.update_initializer(
Reported by Pylint.
Line: 57
Column: 1
return op_call([blob_in, weight, bias], blob_out, **kwargs)
def fc(model, *args, **kwargs):
return _FC_or_packed_FC(model, model.net.FC, *args, **kwargs)
def packed_fc(model, *args, **kwargs):
return _FC_or_packed_FC(model, model.net.PackedFC, *args, **kwargs)
Reported by Pylint.
Line: 57
Column: 1
return op_call([blob_in, weight, bias], blob_out, **kwargs)
def fc(model, *args, **kwargs):
return _FC_or_packed_FC(model, model.net.FC, *args, **kwargs)
def packed_fc(model, *args, **kwargs):
return _FC_or_packed_FC(model, model.net.PackedFC, *args, **kwargs)
Reported by Pylint.
Line: 61
Column: 1
return _FC_or_packed_FC(model, model.net.FC, *args, **kwargs)
def packed_fc(model, *args, **kwargs):
return _FC_or_packed_FC(model, model.net.PackedFC, *args, **kwargs)
def fc_decomp(
model, blob_in, blob_out, dim_in, dim_out,
Reported by Pylint.
Line: 65
Column: 1
return _FC_or_packed_FC(model, model.net.PackedFC, *args, **kwargs)
def fc_decomp(
model, blob_in, blob_out, dim_in, dim_out,
rank_approx=5, weight_init=None, bias_init=None,
WeightInitializer=None, BiasInitializer=None, **kwargs
):
"""FC_Decomp version
Reported by Pylint.
caffe2/contrib/fakelowp/test/test_int8_quant.py
20 issues
Line: 2
Column: 1
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import settings
Reported by Pylint.
Line: 5
Column: 1
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import settings
Reported by Pylint.
Line: 6
Column: 1
import datetime
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import settings
workspace.GlobalInit(
Reported by Pylint.
Line: 7
Column: 1
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import settings
workspace.GlobalInit(
[
Reported by Pylint.
Line: 8
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import settings
workspace.GlobalInit(
[
"caffe2",
Reported by Pylint.
Line: 9
Column: 1
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import settings
workspace.GlobalInit(
[
"caffe2",
"--glow_global_fp16=0",
Reported by Pylint.
Line: 2
Column: 1
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import settings
Reported by Pylint.
Line: 1
Column: 1
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import settings
Reported by Pylint.
Line: 3
Column: 1
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import settings
Reported by Pylint.
Line: 20
Column: 1
]
)
class QuantTest(serial.SerializedTestCase):
@settings(deadline=datetime.timedelta(seconds=10))
def test_dequantize(self):
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.append("X")
Reported by Pylint.
benchmarks/instruction_counts/worker/main.py
20 issues
Line: 33
Column: 5
# Benchmark utils are only partially strict compliant, so MyPy won't follow
# imports using the public namespace. (Due to an exclusion rule in
# mypy-strict.ini)
from torch.utils.benchmark.utils.timer import Language, Timer
from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import CallgrindStats
else:
from torch.utils.benchmark import CallgrindStats, Language, Timer
Reported by Pylint.
Line: 34
Column: 5
# imports using the public namespace. (Due to an exclusion rule in
# mypy-strict.ini)
from torch.utils.benchmark.utils.timer import Language, Timer
from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import CallgrindStats
else:
from torch.utils.benchmark import CallgrindStats, Language, Timer
Reported by Pylint.
Line: 37
Column: 5
from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import CallgrindStats
else:
from torch.utils.benchmark import CallgrindStats, Language, Timer
WORKER_PATH = os.path.abspath(__file__)
Reported by Pylint.
Line: 74
Column: 15
setup: str = "pass"
global_setup: str = ""
num_threads: int = 1
language: Language = Language.PYTHON
@dataclasses.dataclass(frozen=True)
class WorkerOutput:
# Only return values to reduce communication between main process and workers.
Reported by Pylint.
Line: 131
Column: 13
# =============================================================================
def _run(timer_args: WorkerTimerArgs) -> WorkerOutput:
timer = Timer(
stmt=timer_args.stmt,
setup=timer_args.setup or "pass",
global_setup=timer_args.global_setup,
# Prevent NotImplementedError on GPU builds and C++ snippets.
Reported by Pylint.
Line: 144
Column: 18
m = timer.blocked_autorange(min_run_time=MIN_RUN_TIME)
stats: Tuple[CallgrindStats, ...] = timer.collect_callgrind(
number=CALLGRIND_NUMBER,
collect_baseline=False,
repeats=CALLGRIND_REPEATS,
retain_out_file=False,
)
Reported by Pylint.
Line: 157
Column: 10
)
def main(communication_file: str) -> None:
result: Union[WorkerOutput, WorkerFailure]
try:
with open(communication_file, "rb") as f:
timer_args: WorkerTimerArgs = WorkerUnpickler(f).load_input()
assert isinstance(timer_args, WorkerTimerArgs)
Reported by Pylint.
Line: 169
Column: 12
# Runner process sent SIGINT.
sys.exit()
except BaseException:
trace_f = io.StringIO()
traceback.print_exc(file=trace_f)
result = WorkerFailure(failure_trace=trace_f.getvalue())
if not os.path.exists(os.path.split(communication_file)[0]):
Reported by Pylint.
Line: 22
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b403-import-pickle
import dataclasses
import io
import os
import pickle
import timeit
import traceback
from typing import Any, Tuple, Union, TYPE_CHECKING
import sys
Reported by Bandit.
Line: 78
Column: 1
@dataclasses.dataclass(frozen=True)
class WorkerOutput:
# Only return values to reduce communication between main process and workers.
wall_times: Tuple[float, ...]
instructions: Tuple[int, ...]
Reported by Pylint.
benchmarks/operator_benchmark/c2/matmul_test.py
20 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for MatMul operator"""
# Configs for C2 Matmul operator
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for MatMul operator"""
# Configs for C2 Matmul operator
Reported by Pylint.
Line: 5
Column: 1
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for MatMul operator"""
# Configs for C2 Matmul operator
mm_long_configs = op_bench.cross_product_configs(
Reported by Pylint.
Line: 10
Column: 19
"""Microbenchmarks for MatMul operator"""
# Configs for C2 Matmul operator
mm_long_configs = op_bench.cross_product_configs(
M=[8, 64, 128],
N=range(2, 10, 3),
K=[2 ** x for x in range(0, 3)],
trans_a=[True, False],
trans_b=[True, False],
Reported by Pylint.
Line: 20
Column: 20
)
mm_short_configs = op_bench.config_list(
attrs=[
[128, 128, 128, False, True],
[1024, 1024, 256, True, False],
[8192, 8192, 1024, True, False],
],
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for MatMul operator"""
# Configs for C2 Matmul operator
Reported by Pylint.
Line: 7
Column: 1
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for MatMul operator"""
# Configs for C2 Matmul operator
mm_long_configs = op_bench.cross_product_configs(
M=[8, 64, 128],
N=range(2, 10, 3),
Reported by Pylint.
Line: 33
Column: 9
class MatMulBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, K, trans_a, trans_b):
self.input_one = self.tensor([N, M]) if trans_a else self.tensor([M, N])
self.input_two = self.tensor([K, N]) if trans_b else self.tensor([N, K])
self.args = {'trans_a': trans_a, 'trans_b': trans_b}
self.output = self.tensor([M, K])
self.set_module_name("matmul")
Reported by Pylint.
Line: 34
Column: 9
class MatMulBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, K, trans_a, trans_b):
self.input_one = self.tensor([N, M]) if trans_a else self.tensor([M, N])
self.input_two = self.tensor([K, N]) if trans_b else self.tensor([N, K])
self.args = {'trans_a': trans_a, 'trans_b': trans_b}
self.output = self.tensor([M, K])
self.set_module_name("matmul")
def forward(self):
Reported by Pylint.
Line: 35
Column: 9
def init(self, M, N, K, trans_a, trans_b):
self.input_one = self.tensor([N, M]) if trans_a else self.tensor([M, N])
self.input_two = self.tensor([K, N]) if trans_b else self.tensor([N, K])
self.args = {'trans_a': trans_a, 'trans_b': trans_b}
self.output = self.tensor([M, K])
self.set_module_name("matmul")
def forward(self):
op = core.CreateOperator(
Reported by Pylint.
caffe2/python/docs/github.py
20 issues
Line: 15
Column: 5
class GHOpDocUploader(DocUploader):
def __init__(self):
pass
def upload(self, content_body):
print(content_body)
Reported by Pylint.
Line: 18
Column: 5
def __init__(self):
pass
def upload(self, content_body):
print(content_body)
class GHMarkdown(Markdown):
def addHeader(self, text, h=1):
Reported by Pylint.
Line: 44
Column: 35
for row in table[1:]:
self.addLine(' | '.join(row))
def addTableHTML(self, table, noTitle=False):
self.addRaw("<table>")
for row in table:
self.addRaw("<tr>")
for cell in row:
self.addRaw("<td>")
Reported by Pylint.
Line: 1
Column: 1
## @package github
# Module caffe2.python.docs.github
import argparse
import os
from caffe2.python.docs.formatter import Markdown
Reported by Pylint.
Line: 14
Column: 1
from caffe2.python.docs.generator import OperatorDoc, OperatorEngine
class GHOpDocUploader(DocUploader):
def __init__(self):
pass
def upload(self, content_body):
print(content_body)
Reported by Pylint.
Line: 14
Column: 1
from caffe2.python.docs.generator import OperatorDoc, OperatorEngine
class GHOpDocUploader(DocUploader):
def __init__(self):
pass
def upload(self, content_body):
print(content_body)
Reported by Pylint.
Line: 22
Column: 1
print(content_body)
class GHMarkdown(Markdown):
def addHeader(self, text, h=1):
self.addLine("\n{header} {text}\n".format(header=h * '#', text=text), True)
def addDocHeader(self):
self.addLine("---")
Reported by Pylint.
Line: 26
Column: 5
def addHeader(self, text, h=1):
self.addLine("\n{header} {text}\n".format(header=h * '#', text=text), True)
def addDocHeader(self):
self.addLine("---")
self.addLine("docid: operators-catalog")
self.addLine("title: Operators Catalog")
self.addLine("layout: operators")
self.addLine("permalink: /docs/operators-catalogue.html")
Reported by Pylint.
Line: 26
Column: 5
def addHeader(self, text, h=1):
self.addLine("\n{header} {text}\n".format(header=h * '#', text=text), True)
def addDocHeader(self):
self.addLine("---")
self.addLine("docid: operators-catalog")
self.addLine("title: Operators Catalog")
self.addLine("layout: operators")
self.addLine("permalink: /docs/operators-catalogue.html")
Reported by Pylint.
Line: 38
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def addTable(self, table, noTitle=False):
self.addLinebreak()
assert(len(table) > 1)
self.addLine(' | '.join(['----------' for i in range(len(table[0]))]))
self.addLine(' | '.join(table[0]))
for row in table[1:]:
self.addLine(' | '.join(row))
Reported by Bandit.
caffe2/contrib/playground/AnyExpOnTerm.py
20 issues
Line: 45
Column: 62
opts['epoch_iter']['num_epochs_per_flow_schedule']):
# must support checkpoint or the multiple schedule will always
# start from initial state
checkpoint_model = None if epoch == start_epoch else ret['model']
pretrained_model = None if epoch > start_epoch else pretrained_model
shard_results = []
# with LexicalContext('epoch{}_gang'.format(epoch),gang_schedule=False):
for shard_id in range(opts['distributed']['num_shards']):
opts['temp_var']['shard_id'] = shard_id
Reported by Pylint.
Line: 19
Column: 25
log.setLevel(logging.DEBUG)
def runShardedTrainLoop(opts, myTrainFun):
start_epoch = 0
pretrained_model = opts['model_param']['pretrained_model']
if pretrained_model != '' and os.path.exists(pretrained_model):
# Only want to get start_epoch.
start_epoch, prev_checkpointed_lr, best_metric = \
Reported by Pylint.
Line: 24
Column: 22
pretrained_model = opts['model_param']['pretrained_model']
if pretrained_model != '' and os.path.exists(pretrained_model):
# Only want to get start_epoch.
start_epoch, prev_checkpointed_lr, best_metric = \
checkpoint.initialize_params_from_file(
model=None,
weights_file=pretrained_model,
num_xpus=1,
opts=opts,
Reported by Pylint.
Line: 24
Column: 44
pretrained_model = opts['model_param']['pretrained_model']
if pretrained_model != '' and os.path.exists(pretrained_model):
# Only want to get start_epoch.
start_epoch, prev_checkpointed_lr, best_metric = \
checkpoint.initialize_params_from_file(
model=None,
weights_file=pretrained_model,
num_xpus=1,
opts=opts,
Reported by Pylint.
Line: 33
Column: 14
broadcast_computed_param=True,
reset_epoch=opts['model_param']['reset_epoch'],
)
log.info('start epoch: {}'.format(start_epoch))
pretrained_model = None if pretrained_model == '' else pretrained_model
ret = None
pretrained_model = ""
shard_results = []
Reported by Pylint.
Line: 35
Column: 5
)
log.info('start epoch: {}'.format(start_epoch))
pretrained_model = None if pretrained_model == '' else pretrained_model
ret = None
pretrained_model = ""
shard_results = []
for epoch in range(start_epoch,
Reported by Pylint.
Line: 65
Column: 18
ret = shard_ret
opts['temp_var']['metrics_output'] = ret['metrics']
break
log.info('ret is: {}'.format(str(ret)))
return ret
def trainFun():
Reported by Pylint.
Line: 71
Column: 24
def trainFun():
def simpleTrainFun(opts):
trainerClass = AnyExp.createTrainerClass(opts)
trainerClass = AnyExp.overrideAdditionalMethods(trainerClass, opts)
trainer = trainerClass(opts)
return trainer.buildModelAndTrain(opts)
return simpleTrainFun
Reported by Pylint.
Line: 88
Column: 14
args = parser.parse_args()
opts = args.params['opts']
opts = AnyExp.initOpts(opts)
log.info('opts is: {}'.format(str(opts)))
AnyExp.initDefaultModuleMap()
opts['input']['datasets'] = AnyExp.aquireDatasets(opts)
Reported by Pylint.
Line: 98
Column: 14
# some other custermized training function.
ret = runShardedTrainLoop(opts, trainFun())
log.info('ret is: {}'.format(str(ret)))
Reported by Pylint.
caffe2/experiments/python/net_construct_bench.py
20 issues
Line: 32
Column: 1
import caffe2.python.models.resnet as resnet
'''
Simple benchmark that creates a data-parallel resnet-50 model
and measures the time.
'''
Reported by Pylint.
Line: 79
Column: 14
def Create(args):
gpus = list(range(args.num_gpus))
log.info("Running on gpus: {}".format(gpus))
# Create CNNModeLhelper object
train_model = cnn.CNNModelHelper(
order="NCHW",
name="resnet50",
Reported by Pylint.
Line: 90
Column: 42
)
# Model building functions
def create_resnet50_model_ops(model, loss_scale):
[softmax, loss] = resnet.create_resnet50(
model,
"data",
num_input_channels=3,
num_labels=1000,
Reported by Pylint.
Line: 116
Column: 25
)
AddMomentumParameterUpdate(model, LR)
def add_image_input(model):
pass
start_time = time.time()
# Create parallelized model
Reported by Pylint.
Line: 131
Column: 5
)
ct = time.time() - start_time
train_model.net._CheckLookupTables()
log.info("Model create for {} gpus took: {} secs".format(len(gpus), ct))
def main():
Reported by Pylint.
Line: 133
Column: 14
ct = time.time() - start_time
train_model.net._CheckLookupTables()
log.info("Model create for {} gpus took: {} secs".format(len(gpus), ct))
def main():
# TODO: use argv
parser = argparse.ArgumentParser(
Reported by Pylint.
Line: 137
Column: 3
def main():
# TODO: use argv
parser = argparse.ArgumentParser(
description="Caffe2: Benchmark for net construction"
)
parser.add_argument("--num_gpus", type=int, default=1,
help="Number of GPUs.")
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 43
Column: 1
log.setLevel(logging.DEBUG)
def AddMomentumParameterUpdate(train_model, LR):
'''
Add the momentum-SGD update.
'''
params = train_model.GetParams()
assert(len(params) > 0)
Reported by Pylint.
Line: 43
Column: 1
log.setLevel(logging.DEBUG)
def AddMomentumParameterUpdate(train_model, LR):
'''
Add the momentum-SGD update.
'''
params = train_model.GetParams()
assert(len(params) > 0)
Reported by Pylint.