The following issues were found
benchmarks/serialization/simple_measurement.py
13 issues
Line: 1
Column: 1
import torch
from pyarkbench import Benchmark, Timer, default_args
use_new = True
class Basic(Benchmark):
def benchmark(self):
x = [torch.ones(200, 200) for i in range(30)]
with Timer() as big1:
Reported by Pylint.
Line: 2
Column: 1
import torch
from pyarkbench import Benchmark, Timer, default_args
use_new = True
class Basic(Benchmark):
def benchmark(self):
x = [torch.ones(200, 200) for i in range(30)]
with Timer() as big1:
Reported by Pylint.
Line: 13
Column: 13
torch.save(x, "big_tensor.zip", _use_new_zipfile_serialization=use_new)
with Timer() as big2:
v = torch.load("big_tensor.zip")
x = [torch.ones(10, 10) for i in range(200)]
with Timer() as small1:
torch.save(x, "small_tensor.zip", _use_new_zipfile_serialization=use_new)
Reported by Pylint.
Line: 1
Column: 1
import torch
from pyarkbench import Benchmark, Timer, default_args
use_new = True
class Basic(Benchmark):
def benchmark(self):
x = [torch.ones(200, 200) for i in range(30)]
with Timer() as big1:
Reported by Pylint.
Line: 4
Column: 1
import torch
from pyarkbench import Benchmark, Timer, default_args
use_new = True
class Basic(Benchmark):
def benchmark(self):
x = [torch.ones(200, 200) for i in range(30)]
with Timer() as big1:
Reported by Pylint.
Line: 6
Column: 1
use_new = True
class Basic(Benchmark):
def benchmark(self):
x = [torch.ones(200, 200) for i in range(30)]
with Timer() as big1:
torch.save(x, "big_tensor.zip", _use_new_zipfile_serialization=use_new)
Reported by Pylint.
Line: 6
Column: 1
use_new = True
class Basic(Benchmark):
def benchmark(self):
x = [torch.ones(200, 200) for i in range(30)]
with Timer() as big1:
torch.save(x, "big_tensor.zip", _use_new_zipfile_serialization=use_new)
Reported by Pylint.
Line: 7
Column: 5
use_new = True
class Basic(Benchmark):
def benchmark(self):
x = [torch.ones(200, 200) for i in range(30)]
with Timer() as big1:
torch.save(x, "big_tensor.zip", _use_new_zipfile_serialization=use_new)
with Timer() as big2:
Reported by Pylint.
Line: 7
Column: 5
use_new = True
class Basic(Benchmark):
def benchmark(self):
x = [torch.ones(200, 200) for i in range(30)]
with Timer() as big1:
torch.save(x, "big_tensor.zip", _use_new_zipfile_serialization=use_new)
with Timer() as big2:
Reported by Pylint.
Line: 8
Column: 9
class Basic(Benchmark):
def benchmark(self):
x = [torch.ones(200, 200) for i in range(30)]
with Timer() as big1:
torch.save(x, "big_tensor.zip", _use_new_zipfile_serialization=use_new)
with Timer() as big2:
v = torch.load("big_tensor.zip")
Reported by Pylint.
benchmarks/operator_benchmark/common/tests/pt_configs_list_test.py
13 issues
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
add_short_configs = op_bench.config_list(
attr_names=['M', 'N', 'K'],
attrs=[
[8, 16, 32],
Reported by Pylint.
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
add_short_configs = op_bench.config_list(
attr_names=['M', 'N', 'K'],
attrs=[
[8, 16, 32],
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
add_short_configs = op_bench.config_list(
attr_names=['M', 'N', 'K'],
attrs=[
[8, 16, 32],
Reported by Pylint.
Line: 23
Column: 9
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype):
self.input_one = torch.rand(M, N, K, device=device, dtype=dtype, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, dtype=dtype)
self.set_module_name('add')
def forward(self):
return torch.add(self.input_one, self.input_two)
Reported by Pylint.
Line: 24
Column: 9
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype):
self.input_one = torch.rand(M, N, K, device=device, dtype=dtype, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, dtype=dtype)
self.set_module_name('add')
def forward(self):
return torch.add(self.input_one, self.input_two)
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
add_short_configs = op_bench.config_list(
attr_names=['M', 'N', 'K'],
attrs=[
[8, 16, 32],
Reported by Pylint.
Line: 21
Column: 1
)
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype):
self.input_one = torch.rand(M, N, K, device=device, dtype=dtype, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, dtype=dtype)
self.set_module_name('add')
Reported by Pylint.
Line: 22
Column: 5
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype):
self.input_one = torch.rand(M, N, K, device=device, dtype=dtype, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, dtype=dtype)
self.set_module_name('add')
def forward(self):
Reported by Pylint.
Line: 22
Column: 5
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype):
self.input_one = torch.rand(M, N, K, device=device, dtype=dtype, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, dtype=dtype)
self.set_module_name('add')
def forward(self):
Reported by Pylint.
Line: 22
Column: 5
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype):
self.input_one = torch.rand(M, N, K, device=device, dtype=dtype, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, dtype=dtype)
self.set_module_name('add')
def forward(self):
Reported by Pylint.
caffe2/python/modeling/compute_norm_for_blobs_test.py
13 issues
Line: 209
Column: 9
def test_compute_norm_row_index_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
net_modifier = ComputeNormForBlobs(
blobs=['fc1_w'],
logging_frequency=10,
compute_averaged_norm=True,
Reported by Pylint.
Line: 1
Column: 1
import unittest
from caffe2.python import workspace, brew, model_helper
from caffe2.python.modeling.compute_norm_for_blobs import ComputeNormForBlobs
Reported by Pylint.
Line: 13
Column: 1
import numpy as np
class ComputeNormForBlobsTest(unittest.TestCase):
def test_compute_norm_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
Reported by Pylint.
Line: 14
Column: 5
class ComputeNormForBlobsTest(unittest.TestCase):
def test_compute_norm_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
Reported by Pylint.
Line: 44
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self.assertEqual(len(model.net.Proto().op), 10)
assert model.net.output_record() is None
def test_compute_norm_for_blobs_modify_output_record(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
Reported by Bandit.
Line: 46
Column: 5
assert model.net.output_record() is None
def test_compute_norm_for_blobs_modify_output_record(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
Reported by Pylint.
Line: 75
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 10)
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
Reported by Bandit.
Line: 78
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
def test_compute_averaged_norm_for_blobs(self):
model = model_helper.ModelHelper(name="test")
Reported by Bandit.
Line: 82
Column: 5
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
def test_compute_averaged_norm_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
Reported by Pylint.
Line: 113
Column: 5
self.assertEqual(len(model.net.Proto().op), 10)
def test_compute_norm_for_blobs_no_print(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
Reported by Pylint.
caffe2/python/db_test.py
13 issues
Line: 26
Column: 14
]
def testSimple(self):
db = workspace.C.create_db(
"minidb", self.file_name, workspace.C.Mode.write)
for key, value in self.data:
transaction = db.new_transaction()
transaction.put(key, value)
Reported by Pylint.
Line: 27
Column: 39
def testSimple(self):
db = workspace.C.create_db(
"minidb", self.file_name, workspace.C.Mode.write)
for key, value in self.data:
transaction = db.new_transaction()
transaction.put(key, value)
del transaction
Reported by Pylint.
Line: 36
Column: 14
del db # should close DB
db = workspace.C.create_db(
"minidb", self.file_name, workspace.C.Mode.read)
cursor = db.new_cursor()
data = []
while cursor.valid():
data.append((cursor.key(), cursor.value()))
Reported by Pylint.
Line: 37
Column: 39
del db # should close DB
db = workspace.C.create_db(
"minidb", self.file_name, workspace.C.Mode.read)
cursor = db.new_cursor()
data = []
while cursor.valid():
data.append((cursor.key(), cursor.value()))
cursor.next() # noqa: B305
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import workspace
import os
import tempfile
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python import workspace
import os
import tempfile
import unittest
class TestDB(unittest.TestCase):
Reported by Pylint.
Line: 9
Column: 1
from caffe2.python import workspace
import os
import tempfile
import unittest
class TestDB(unittest.TestCase):
def setUp(self):
Reported by Pylint.
Line: 10
Column: 1
import os
import tempfile
import unittest
class TestDB(unittest.TestCase):
def setUp(self):
handle, self.file_name = tempfile.mkstemp()
Reported by Pylint.
Line: 13
Column: 1
import unittest
class TestDB(unittest.TestCase):
def setUp(self):
handle, self.file_name = tempfile.mkstemp()
os.close(handle)
self.data = [
(
Reported by Pylint.
Line: 25
Column: 5
for i in range(1, 10)
]
def testSimple(self):
db = workspace.C.create_db(
"minidb", self.file_name, workspace.C.Mode.write)
for key, value in self.data:
transaction = db.new_transaction()
Reported by Pylint.
caffe2/python/modeling/compute_norm_for_blobs.py
13 issues
Line: 26
Column: 5
row_index: to plot the entire blob or simply one row at the row_index)
"""
def __init__(self, blobs, logging_frequency, p=2, compute_averaged_norm=False, row_index=None):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._p = p
self._compute_averaged_norm = compute_averaged_norm
self._field_name_suffix = '_l{}_norm'.format(p)
Reported by Pylint.
Line: 40
Column: 5
row_index))
self.row_index = row_index
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
p = self._p
compute_averaged_norm = self._compute_averaged_norm
row_index = self.row_index
Reported by Pylint.
Line: 77
Column: 17
norm = net.LpNorm(
cast_blob, norm_name, p=p, average=compute_averaged_norm
)
norm_stop_gradient = net.StopGradient(norm, net.NextScopedBlob(norm_name + "_stop_gradient"))
if self._logging_frequency >= 1:
net.Print(norm, [], every_n=self._logging_frequency)
if modify_output_record:
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core, schema, muji
from caffe2.python.modeling.net_modifier import NetModifier
Reported by Pylint.
Line: 26
Column: 5
row_index: to plot the entire blob or simply one row at the row_index)
"""
def __init__(self, blobs, logging_frequency, p=2, compute_averaged_norm=False, row_index=None):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._p = p
self._compute_averaged_norm = compute_averaged_norm
self._field_name_suffix = '_l{}_norm'.format(p)
Reported by Pylint.
Line: 40
Column: 5
row_index))
self.row_index = row_index
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
p = self._p
compute_averaged_norm = self._compute_averaged_norm
row_index = self.row_index
Reported by Pylint.
Line: 40
Column: 5
row_index))
self.row_index = row_index
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
p = self._p
compute_averaged_norm = self._compute_averaged_norm
row_index = self.row_index
Reported by Pylint.
Line: 43
Column: 9
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
p = self._p
compute_averaged_norm = self._compute_averaged_norm
row_index = self.row_index
CPU = muji.OnCPU()
# if given, blob_to_device is a map from blob to device_option
Reported by Pylint.
Line: 47
Column: 9
compute_averaged_norm = self._compute_averaged_norm
row_index = self.row_index
CPU = muji.OnCPU()
# if given, blob_to_device is a map from blob to device_option
blob_to_device = blob_to_device or {}
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
Reported by Pylint.
Line: 52
Column: 1
blob_to_device = blob_to_device or {}
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
if blob in blob_to_device:
device = blob_to_device[blob]
else:
device = CPU
Reported by Pylint.
caffe2/python/mkl/mkl_concat_op_test.py
13 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 16
Column: 9
@unittest.skipIf(
not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn."
)
class MKLConcatTest(hu.HypothesisTestCase):
@given(
batch_size=st.integers(1, 10),
channel_splits=st.lists(st.integers(1, 10), min_size=1, max_size=3),
Reported by Pylint.
Line: 27
Column: 58
**mu.gcs
)
def test_mkl_concat(
self, batch_size, channel_splits, height, width, gc, dc
):
Xs = [
np.random.rand(batch_size, channel,
height, width).astype(np.float32)
for channel in channel_splits
Reported by Pylint.
Line: 44
Column: 5
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
Line: 18
Column: 1
@unittest.skipIf(
not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn."
)
class MKLConcatTest(hu.HypothesisTestCase):
@given(
batch_size=st.integers(1, 10),
channel_splits=st.lists(st.integers(1, 10), min_size=1, max_size=3),
height=st.integers(1, 10),
width=st.integers(1, 10),
Reported by Pylint.
Line: 25
Column: 5
height=st.integers(1, 10),
width=st.integers(1, 10),
**mu.gcs
)
def test_mkl_concat(
self, batch_size, channel_splits, height, width, gc, dc
):
Xs = [
np.random.rand(batch_size, channel,
Reported by Pylint.
Line: 25
Column: 5
height=st.integers(1, 10),
width=st.integers(1, 10),
**mu.gcs
)
def test_mkl_concat(
self, batch_size, channel_splits, height, width, gc, dc
):
Xs = [
np.random.rand(batch_size, channel,
Reported by Pylint.
Line: 25
Column: 5
height=st.integers(1, 10),
width=st.integers(1, 10),
**mu.gcs
)
def test_mkl_concat(
self, batch_size, channel_splits, height, width, gc, dc
):
Xs = [
np.random.rand(batch_size, channel,
Reported by Pylint.
benchmarks/operator_benchmark/pt/channel_shuffle_test.py
13 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for channel_shuffle operator."""
# Configs for PT channel_shuffle operator
channel_shuffle_long_configs = op_bench.cross_product_configs(
Reported by Pylint.
Line: 9
Column: 32
# Configs for PT channel_shuffle operator
channel_shuffle_long_configs = op_bench.cross_product_configs(
batch_size=[4, 8],
channels_per_group=[32, 64],
height=[32, 64],
width=[32, 64],
groups=[4, 8],
Reported by Pylint.
Line: 20
Column: 33
)
channel_shuffle_short_configs = op_bench.config_list(
attr_names=["batch_size", "channels_per_group", "height", "width", "groups"],
attrs=[
[2, 16, 16, 16, 2],
[2, 32, 32, 32, 2],
[4, 32, 32, 32, 4],
Reported by Pylint.
Line: 37
Column: 31
)
class ChannelSHuffleBenchmark(op_bench.TorchBenchmarkBase):
def init(self, batch_size, channels_per_group, height, width, groups, channel_last):
channels = channels_per_group * groups
data_shape = (batch_size, channels, height, width)
input_data = torch.rand(data_shape)
if channel_last:
Reported by Pylint.
Line: 54
Column: 1
return torch.channel_shuffle(input_data, groups)
op_bench.generate_pt_test(channel_shuffle_short_configs + channel_shuffle_long_configs,
ChannelSHuffleBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 5
Column: 1
import torch
"""Microbenchmarks for channel_shuffle operator."""
# Configs for PT channel_shuffle operator
channel_shuffle_long_configs = op_bench.cross_product_configs(
batch_size=[4, 8],
Reported by Pylint.
Line: 44
Column: 9
input_data = torch.rand(data_shape)
if channel_last:
input_data = input_data.contiguous(memory_format=torch.channels_last)
self.inputs = {
"input_data": input_data,
"groups": groups
}
self.set_module_name('channel_shuffle')
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for channel_shuffle operator."""
# Configs for PT channel_shuffle operator
channel_shuffle_long_configs = op_bench.cross_product_configs(
Reported by Pylint.
Line: 37
Column: 1
)
class ChannelSHuffleBenchmark(op_bench.TorchBenchmarkBase):
def init(self, batch_size, channels_per_group, height, width, groups, channel_last):
channels = channels_per_group * groups
data_shape = (batch_size, channels, height, width)
input_data = torch.rand(data_shape)
if channel_last:
Reported by Pylint.
Line: 38
Column: 5
class ChannelSHuffleBenchmark(op_bench.TorchBenchmarkBase):
def init(self, batch_size, channels_per_group, height, width, groups, channel_last):
channels = channels_per_group * groups
data_shape = (batch_size, channels, height, width)
input_data = torch.rand(data_shape)
if channel_last:
input_data = input_data.contiguous(memory_format=torch.channels_last)
Reported by Pylint.
benchmarks/overrides_benchmark/common.py
13 issues
Line: 1
Column: 1
import torch
NUM_REPEATS = 1000
NUM_REPEAT_OF_REPEATS = 1000
class SubTensor(torch.Tensor):
pass
Reported by Pylint.
Line: 19
Column: 40
self._tensor = torch.tensor(data, requires_grad=requires_grad)
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return WithTorchFunction(args[0]._tensor + args[1]._tensor)
Reported by Pylint.
Line: 19
Column: 34
self._tensor = torch.tensor(data, requires_grad=requires_grad)
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return WithTorchFunction(args[0]._tensor + args[1]._tensor)
Reported by Pylint.
Line: 23
Column: 34
if kwargs is None:
kwargs = {}
return WithTorchFunction(args[0]._tensor + args[1]._tensor)
class SubWithTorchFunction(torch.Tensor):
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
Reported by Pylint.
Line: 23
Column: 52
if kwargs is None:
kwargs = {}
return WithTorchFunction(args[0]._tensor + args[1]._tensor)
class SubWithTorchFunction(torch.Tensor):
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
Reported by Pylint.
Line: 1
Column: 1
import torch
NUM_REPEATS = 1000
NUM_REPEAT_OF_REPEATS = 1000
class SubTensor(torch.Tensor):
pass
Reported by Pylint.
Line: 7
Column: 1
NUM_REPEAT_OF_REPEATS = 1000
class SubTensor(torch.Tensor):
pass
class WithTorchFunction:
def __init__(self, data, requires_grad=False):
Reported by Pylint.
Line: 7
Column: 1
NUM_REPEAT_OF_REPEATS = 1000
class SubTensor(torch.Tensor):
pass
class WithTorchFunction:
def __init__(self, data, requires_grad=False):
Reported by Pylint.
Line: 11
Column: 1
pass
class WithTorchFunction:
def __init__(self, data, requires_grad=False):
if isinstance(data, torch.Tensor):
self._tensor = data
return
Reported by Pylint.
Line: 11
Column: 1
pass
class WithTorchFunction:
def __init__(self, data, requires_grad=False):
if isinstance(data, torch.Tensor):
self._tensor = data
return
Reported by Pylint.
caffe2/python/control_ops_util.py
13 issues
Line: 1
Column: 1
## @package control_ops_util
# Module caffe2.python.control_ops_util
from caffe2.python import core
Reported by Pylint.
Line: 24
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
net_ssa, _ = core.get_ssa(net_proto)
input_names = core.get_undefined_blobs(net_ssa)
for input_name in input_names:
assert str(input_name) in lexical_scope, \
"Input blob " + input_name + " is undefined"
output_names = set()
for op in net_proto.op:
for output in op.output:
Reported by Bandit.
Line: 28
Column: 9
"Input blob " + input_name + " is undefined"
output_names = set()
for op in net_proto.op:
for output in op.output:
if output in lexical_scope:
output_names.add(output)
return input_names, output_names
Reported by Pylint.
Line: 36
Column: 1
return input_names, output_names
def add_if_op(if_net, cond_blob, lexical_scope, then_net, else_net=None):
"""
A helper function to add an If op to the net.
Automatically determines whether blobs in the then/else subnets are external
(from the outer workspace) or local (visible only inside subnet's workspace)
based on lexical scope - set of all outer blob names visible to the 'If'
Reported by Pylint.
Line: 77
Column: 1
then_output_blobs = \
[core.BlobReference(name=b, net=None) for b in then_output_blob_names]
then_input_output_names_ordered = [
str(b) for b in (then_input_blobs + then_output_blobs)]
then_outer_blob_names = list(then_input_blob_names | then_output_blob_names)
then_outer_blob_names_idx = [
then_input_output_names_ordered.index(b) for b in then_outer_blob_names]
Reported by Pylint.
Line: 111
Column: 1
else_output_blobs = \
[core.BlobReference(name=b, net=None) for b in else_output_blob_names]
else_input_output_names_ordered = [
str(b) for b in (else_input_blobs + else_output_blobs)]
else_outer_blob_names = list(else_input_blob_names | else_output_blob_names)
else_outer_blob_names_idx = [
else_input_output_names_ordered.index(b) for b in else_outer_blob_names]
Reported by Pylint.
Line: 141
Column: 1
if_net.AddExternalOutput(*if_outputs)
def add_while_op(
while_net, cond_blob, lexical_scope, loop_body_net, condition_body_net=None):
"""
A helper function to add a While op to the net. Same rules for determining
outer and inner blobs as for the 'If' operator apply for the 'While' operator
loop and condition subnets. If specified, condition net is executed in a separate
Reported by Pylint.
Line: 141
Column: 1
if_net.AddExternalOutput(*if_outputs)
def add_while_op(
while_net, cond_blob, lexical_scope, loop_body_net, condition_body_net=None):
"""
A helper function to add a While op to the net. Same rules for determining
outer and inner blobs as for the 'If' operator apply for the 'While' operator
loop and condition subnets. If specified, condition net is executed in a separate
Reported by Pylint.
Line: 172
Column: 1
do_loop_body_net = core.Net('do_loop_body_net')
loop_input_output_names_ordered = [
str(b) for b in (loop_inputs + loop_outputs)]
loop_body_outer_blob_names = list(input_blob_names | output_blob_names)
loop_body_outer_blob_names_idx = [
loop_input_output_names_ordered.index(b) for b in loop_body_outer_blob_names]
do_loop_body_workspace_blob = \
Reported by Pylint.
Line: 206
Column: 13
# make sure condition blob is written by condition net and is
# visible outside of it
found_condition_output = False
for op in condition_body_net.Proto().op:
if str(cond_blob) in op.output:
found_condition_output = True
break
assert found_condition_output, \
"Condition net does not write into condition blob"
Reported by Pylint.
caffe2/experiments/python/funhash_op_test.py
13 issues
Line: 22
Column: 1
import numpy as np
from scipy.sparse import coo_matrix
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
Reported by Pylint.
Line: 24
Column: 1
import numpy as np
from scipy.sparse import coo_matrix
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 25
Column: 1
from scipy.sparse import coo_matrix
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 31
Column: 1
import caffe2.python.hypothesis_test_util as hu
class TestFunHash(hu.HypothesisTestCase):
@given(n_out=st.integers(min_value=5, max_value=20),
n_in=st.integers(min_value=10, max_value=20),
n_data=st.integers(min_value=2, max_value=8),
n_weight=st.integers(min_value=8, max_value=15),
n_alpha=st.integers(min_value=3, max_value=8),
Reported by Pylint.
Line: 39
Column: 5
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
Reported by Pylint.
Line: 39
Column: 5
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
Reported by Pylint.
Line: 39
Column: 5
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
Reported by Pylint.
Line: 39
Column: 5
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
Reported by Pylint.
Line: 39
Column: 5
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
Reported by Pylint.