The following issues were found
caffe2/python/predictor/predictor_test.py
8 issues
Line: 1
Column: 1
import unittest
import numpy as np
from caffe2.python import workspace, core
Reported by Pylint.
Line: 13
Column: 1
from caffe2.proto import caffe2_pb2
class TestPredictor(unittest.TestCase):
def setUp(self):
np.random.seed(1)
self.predict_net = self._predict_net
self.init_net = self._init_net
Reported by Pylint.
Line: 57
Column: 5
])
return net.SerializeToString()
def test_run(self):
A = np.ones((2, 3), np.float32)
B = np.ones((3, 4), np.float32)
predictor = workspace.Predictor(self.init_net, self.predict_net)
outputs = predictor.run([A, B])
self.assertEqual(len(outputs), 1)
Reported by Pylint.
Line: 58
Column: 9
return net.SerializeToString()
def test_run(self):
A = np.ones((2, 3), np.float32)
B = np.ones((3, 4), np.float32)
predictor = workspace.Predictor(self.init_net, self.predict_net)
outputs = predictor.run([A, B])
self.assertEqual(len(outputs), 1)
np.testing.assert_almost_equal(np.dot(A, B), outputs[0])
Reported by Pylint.
Line: 59
Column: 9
def test_run(self):
A = np.ones((2, 3), np.float32)
B = np.ones((3, 4), np.float32)
predictor = workspace.Predictor(self.init_net, self.predict_net)
outputs = predictor.run([A, B])
self.assertEqual(len(outputs), 1)
np.testing.assert_almost_equal(np.dot(A, B), outputs[0])
Reported by Pylint.
Line: 65
Column: 5
self.assertEqual(len(outputs), 1)
np.testing.assert_almost_equal(np.dot(A, B), outputs[0])
def test_run_map(self):
A = np.zeros((2, 3), np.float32)
B = np.ones((3, 4), np.float32)
predictor = workspace.Predictor(self.init_net, self.predict_net)
outputs = predictor.run({
'B': B,
Reported by Pylint.
Line: 66
Column: 9
np.testing.assert_almost_equal(np.dot(A, B), outputs[0])
def test_run_map(self):
A = np.zeros((2, 3), np.float32)
B = np.ones((3, 4), np.float32)
predictor = workspace.Predictor(self.init_net, self.predict_net)
outputs = predictor.run({
'B': B,
})
Reported by Pylint.
Line: 67
Column: 9
def test_run_map(self):
A = np.zeros((2, 3), np.float32)
B = np.ones((3, 4), np.float32)
predictor = workspace.Predictor(self.init_net, self.predict_net)
outputs = predictor.run({
'B': B,
})
self.assertEqual(len(outputs), 1)
Reported by Pylint.
caffe2/python/onnx/onnxifi.py
8 issues
Line: 16
Column: 12
"""
Set onnxifi option
"""
return C.onnxifi_set_option(option_name, str(option_value))
def onnxifi_get_option(option_name):
"""
Get onnxifi option
Reported by Pylint.
Line: 23
Column: 12
"""
Get onnxifi option
"""
return C.onnxifi_get_option(option_name)
def onnxifi_caffe2_net(
pred_net,
input_shapes,
max_batch_size=1,
Reported by Pylint.
Line: 54
Column: 20
shape_hints.shapes.extend([tbs])
shape_hints.max_batch_size = max_batch_size
shape_hints.max_feature_len = max_seq_size
pred_net_str = C.onnxifi(pred_net.SerializeToString(),
shape_hints.SerializeToString(),
block_list if block_list else [],
weight_names if weight_names is not None else [],
max_batch_size,
max_seq_size,
Reported by Pylint.
Line: 25
Column: 1
"""
return C.onnxifi_get_option(option_name)
def onnxifi_caffe2_net(
pred_net,
input_shapes,
max_batch_size=1,
max_seq_size=1,
debug=False,
Reported by Pylint.
Line: 25
Column: 1
"""
return C.onnxifi_get_option(option_name)
def onnxifi_caffe2_net(
pred_net,
input_shapes,
max_batch_size=1,
max_seq_size=1,
debug=False,
Reported by Pylint.
Line: 42
Column: 8
Transform the caffe2_net by collapsing ONNXIFI-runnable nodes into Onnxifi c2 ops
"""
shape_hints = caffe2_pb2.TensorBoundShapes()
if type(input_shapes) is caffe2_pb2.TensorBoundShapes:
shape_hints = input_shapes
elif type(input_shapes) is dict:
for k, v in input_shapes.items():
tbs = caffe2_pb2.TensorBoundShape()
tbs.name = k
Reported by Pylint.
Line: 44
Column: 10
shape_hints = caffe2_pb2.TensorBoundShapes()
if type(input_shapes) is caffe2_pb2.TensorBoundShapes:
shape_hints = input_shapes
elif type(input_shapes) is dict:
for k, v in input_shapes.items():
tbs = caffe2_pb2.TensorBoundShape()
tbs.name = k
tbs.shape.dims.extend(v)
tbs.dim_type.extend([caffe2_pb2.TensorBoundShape.CONSTANT] * len(tbs.shape.dims))
Reported by Pylint.
Line: 45
Column: 16
if type(input_shapes) is caffe2_pb2.TensorBoundShapes:
shape_hints = input_shapes
elif type(input_shapes) is dict:
for k, v in input_shapes.items():
tbs = caffe2_pb2.TensorBoundShape()
tbs.name = k
tbs.shape.dims.extend(v)
tbs.dim_type.extend([caffe2_pb2.TensorBoundShape.CONSTANT] * len(tbs.shape.dims))
tbs.dim_type[0] = caffe2_pb2.TensorBoundShape.BATCH
Reported by Pylint.
caffe2/python/predictor/mobile_exporter_test.py
8 issues
Line: 1
Column: 1
from caffe2.python.test_util import TestCase
from caffe2.python import workspace, brew
from caffe2.python.model_helper import ModelHelper
from caffe2.python.predictor import mobile_exporter
import numpy as np
Reported by Pylint.
Line: 12
Column: 1
import numpy as np
class TestMobileExporter(TestCase):
def test_mobile_exporter(self):
model = ModelHelper(name="mobile_exporter_test_model")
# Test LeNet
brew.conv(model, 'data', 'conv1', dim_in=1, dim_out=20, kernel=5)
brew.max_pool(model, 'conv1', 'pool1', kernel=2, stride=2)
Reported by Pylint.
Line: 13
Column: 5
class TestMobileExporter(TestCase):
def test_mobile_exporter(self):
model = ModelHelper(name="mobile_exporter_test_model")
# Test LeNet
brew.conv(model, 'data', 'conv1', dim_in=1, dim_out=20, kernel=5)
brew.max_pool(model, 'conv1', 'pool1', kernel=2, stride=2)
brew.conv(model, 'pool1', 'conv2', dim_in=20, dim_out=50, kernel=5)
Reported by Pylint.
Line: 13
Column: 5
class TestMobileExporter(TestCase):
def test_mobile_exporter(self):
model = ModelHelper(name="mobile_exporter_test_model")
# Test LeNet
brew.conv(model, 'data', 'conv1', dim_in=1, dim_out=20, kernel=5)
brew.max_pool(model, 'conv1', 'pool1', kernel=2, stride=2)
brew.conv(model, 'pool1', 'conv2', dim_in=20, dim_out=50, kernel=5)
Reported by Pylint.
Line: 65
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
# Output is a vector of outputs but we only care about the first and only result
predictor_out = predictor.run([np_data])
assert len(predictor_out) == 1
predictor_out = predictor_out[0]
np.testing.assert_allclose(
ref_out, predictor_out, atol=1e-10, rtol=1e-10
)
Reported by Bandit.
Line: 72
Column: 5
ref_out, predictor_out, atol=1e-10, rtol=1e-10
)
def test_mobile_exporter_datatypes(self):
model = ModelHelper(name="mobile_exporter_test_model")
model.Copy("data_int", "out")
model.params.append("data_int")
model.Copy("data_obj", "out_obj")
model.params.append("data_obj")
Reported by Pylint.
Line: 72
Column: 5
ref_out, predictor_out, atol=1e-10, rtol=1e-10
)
def test_mobile_exporter_datatypes(self):
model = ModelHelper(name="mobile_exporter_test_model")
model.Copy("data_int", "out")
model.params.append("data_int")
model.Copy("data_obj", "out_obj")
model.params.append("data_obj")
Reported by Pylint.
Line: 121
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
# Output is a vector of outputs.
predictor_out = predictor.run([])
assert len(predictor_out) == 2
predictor_out_int = predictor_out[1]
predictor_out_obj = predictor_out[0]
# The order in predictor_out is non-deterministic. Use type of the entry
# to figure out what to compare it to.
if isinstance(predictor_out[1][0], bytes):
Reported by Bandit.
test/cpp_api_parity/sample_functional.py
8 issues
Line: 1
Column: 1
import torch
import torch.nn.functional as F
from torch.testing._internal.common_nn import wrap_functional
'''
`sample_functional` is used by `test_cpp_api_parity.py` to test that Python / C++ API
parity test harness works for `torch.nn.functional` functions.
When `has_parity=true` is passed to `sample_functional`, behavior of `sample_functional`
Reported by Pylint.
Line: 2
Column: 1
import torch
import torch.nn.functional as F
from torch.testing._internal.common_nn import wrap_functional
'''
`sample_functional` is used by `test_cpp_api_parity.py` to test that Python / C++ API
parity test harness works for `torch.nn.functional` functions.
When `has_parity=true` is passed to `sample_functional`, behavior of `sample_functional`
Reported by Pylint.
Line: 3
Column: 1
import torch
import torch.nn.functional as F
from torch.testing._internal.common_nn import wrap_functional
'''
`sample_functional` is used by `test_cpp_api_parity.py` to test that Python / C++ API
parity test harness works for `torch.nn.functional` functions.
When `has_parity=true` is passed to `sample_functional`, behavior of `sample_functional`
Reported by Pylint.
Line: 5
Column: 1
import torch.nn.functional as F
from torch.testing._internal.common_nn import wrap_functional
'''
`sample_functional` is used by `test_cpp_api_parity.py` to test that Python / C++ API
parity test harness works for `torch.nn.functional` functions.
When `has_parity=true` is passed to `sample_functional`, behavior of `sample_functional`
is the same as the C++ equivalent.
Reported by Pylint.
Line: 1
Column: 1
import torch
import torch.nn.functional as F
from torch.testing._internal.common_nn import wrap_functional
'''
`sample_functional` is used by `test_cpp_api_parity.py` to test that Python / C++ API
parity test harness works for `torch.nn.functional` functions.
When `has_parity=true` is passed to `sample_functional`, behavior of `sample_functional`
Reported by Pylint.
Line: 16
Column: 1
is different from the C++ equivalent.
'''
def sample_functional(x, has_parity):
if has_parity:
return x * 2
else:
return x * 4
Reported by Pylint.
Line: 16
Column: 1
is different from the C++ equivalent.
'''
def sample_functional(x, has_parity):
if has_parity:
return x * 2
else:
return x * 4
Reported by Pylint.
Line: 17
Column: 5
'''
def sample_functional(x, has_parity):
if has_parity:
return x * 2
else:
return x * 4
torch.nn.functional.sample_functional = sample_functional
Reported by Pylint.
test/onnx/model_defs/mnist.py
8 issues
Line: 1
Column: 1
import torch.nn as nn
import torch.nn.functional as F
class MNIST(nn.Module):
def __init__(self):
super(MNIST, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
Reported by Pylint.
Line: 2
Column: 1
import torch.nn as nn
import torch.nn.functional as F
class MNIST(nn.Module):
def __init__(self):
super(MNIST, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
Reported by Pylint.
Line: 1
Column: 1
import torch.nn as nn
import torch.nn.functional as F
class MNIST(nn.Module):
def __init__(self):
super(MNIST, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
Reported by Pylint.
Line: 5
Column: 1
import torch.nn.functional as F
class MNIST(nn.Module):
def __init__(self):
super(MNIST, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
Reported by Pylint.
Line: 5
Column: 1
import torch.nn.functional as F
class MNIST(nn.Module):
def __init__(self):
super(MNIST, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
Reported by Pylint.
Line: 8
Column: 9
class MNIST(nn.Module):
def __init__(self):
super(MNIST, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
Reported by Pylint.
Line: 15
Column: 5
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
Reported by Pylint.
Line: 15
Column: 5
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
Reported by Pylint.
caffe2/python/utils_test.py
8 issues
Line: 1
Column: 1
from caffe2.python import core, utils, test_util
import numpy as np
Reported by Pylint.
Line: 11
Column: 1
import numpy as np
class TestUtils(test_util.TestCase):
def testArgsToDict(self):
args = [utils.MakeArgument("int1", 3),
utils.MakeArgument("float1", 4.0),
utils.MakeArgument("string1", "foo"),
utils.MakeArgument("intlist1", np.array([3, 4])),
Reported by Pylint.
Line: 12
Column: 5
class TestUtils(test_util.TestCase):
def testArgsToDict(self):
args = [utils.MakeArgument("int1", 3),
utils.MakeArgument("float1", 4.0),
utils.MakeArgument("string1", "foo"),
utils.MakeArgument("intlist1", np.array([3, 4])),
utils.MakeArgument("floatlist1", np.array([5.0, 6.0])),
Reported by Pylint.
Line: 12
Column: 5
class TestUtils(test_util.TestCase):
def testArgsToDict(self):
args = [utils.MakeArgument("int1", 3),
utils.MakeArgument("float1", 4.0),
utils.MakeArgument("string1", "foo"),
utils.MakeArgument("intlist1", np.array([3, 4])),
utils.MakeArgument("floatlist1", np.array([5.0, 6.0])),
Reported by Pylint.
Line: 29
Column: 5
self.assertEqual(dict_, expected, "dictionary version of arguments "
"doesn't match original")
def testBuildUniqueMutexIter(self):
init_net = core.Net("init_net")
net = core.Net("net")
utils.BuildUniqueMutexIter(init_net, net)
for op in init_net.Proto().op:
Reported by Pylint.
Line: 29
Column: 5
self.assertEqual(dict_, expected, "dictionary version of arguments "
"doesn't match original")
def testBuildUniqueMutexIter(self):
init_net = core.Net("init_net")
net = core.Net("net")
utils.BuildUniqueMutexIter(init_net, net)
for op in init_net.Proto().op:
Reported by Pylint.
Line: 34
Column: 13
net = core.Net("net")
utils.BuildUniqueMutexIter(init_net, net)
for op in init_net.Proto().op:
self.assertEqual(op.device_option.extra_info[0],
"device_type_override:cpu")
for op in net.Proto().op:
self.assertEqual(op.device_option.extra_info[0],
Reported by Pylint.
Line: 38
Column: 13
self.assertEqual(op.device_option.extra_info[0],
"device_type_override:cpu")
for op in net.Proto().op:
self.assertEqual(op.device_option.extra_info[0],
"device_type_override:cpu")
Reported by Pylint.
test/distributed/pipeline/sync/test_transparency.py
8 issues
Line: 7
Column: 1
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe
Reported by Pylint.
Line: 8
Column: 1
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe
def test_simple_linears(setup_rpc):
Reported by Pylint.
Line: 10
Column: 1
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe
def test_simple_linears(setup_rpc):
def sum_grad(parameters):
return sum([p.grad.sum() for p in parameters if p.grad is not None])
Reported by Pylint.
Line: 13
Column: 25
from torch.distributed.pipeline.sync import Pipe
def test_simple_linears(setup_rpc):
def sum_grad(parameters):
return sum([p.grad.sum() for p in parameters if p.grad is not None])
def zero_grad(parameters):
for p in parameters:
Reported by Pylint.
Line: 1
Column: 1
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
Reported by Pylint.
Line: 13
Column: 1
from torch.distributed.pipeline.sync import Pipe
def test_simple_linears(setup_rpc):
def sum_grad(parameters):
return sum([p.grad.sum() for p in parameters if p.grad is not None])
def zero_grad(parameters):
for p in parameters:
Reported by Pylint.
Line: 18
Column: 13
return sum([p.grad.sum() for p in parameters if p.grad is not None])
def zero_grad(parameters):
for p in parameters:
p.grad = None
inputs = torch.rand(8, 1)
model = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 4), nn.Linear(4, 2), nn.Linear(2, 1),)
Reported by Pylint.
Line: 43
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
grad_with_pipe = sum_grad(model.parameters())
# Both grads should be identical.
assert torch.allclose(grad_with_pipe, grad_without_pipe)
Reported by Bandit.
caffe2/python/text_file_reader.py
8 issues
Line: 12
Column: 1
from caffe2.python.schema import Scalar, Struct, data_type_for_dtype
class TextFileReader(Reader):
"""
Wrapper around operators for reading from text files.
"""
def __init__(self, init_net, filename, schema, num_passes=1, batch_size=1):
"""
Reported by Pylint.
Line: 29
Column: 13
batch_size : Number of rows to read at a time.
"""
assert isinstance(schema, Struct), 'Schema must be a schema.Struct'
for name, child in schema.get_children():
assert isinstance(child, Scalar), (
'Only scalar fields are supported in TextFileReader.')
field_types = [
data_type_for_dtype(dtype) for dtype in schema.field_types()]
Reader.__init__(self, schema)
Reported by Pylint.
Line: 42
Column: 5
field_types=field_types)
self._batch_size = batch_size
def read(self, net):
"""
Create op for reading a batch of rows.
"""
blobs = net.TextFileReaderRead(
[self._reader],
Reported by Pylint.
Line: 1
Column: 1
## @package text_file_reader
# Module caffe2.python.text_file_reader
from caffe2.python import core
from caffe2.python.dataio import Reader
from caffe2.python.schema import Scalar, Struct, data_type_for_dtype
Reported by Pylint.
Line: 16
Column: 5
"""
Wrapper around operators for reading from text files.
"""
def __init__(self, init_net, filename, schema, num_passes=1, batch_size=1):
"""
Create op for building a TextFileReader instance in the workspace.
Args:
init_net : Net that will be run only once at startup.
Reported by Pylint.
Line: 28
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
num_passes : Number of passes over the data.
batch_size : Number of rows to read at a time.
"""
assert isinstance(schema, Struct), 'Schema must be a schema.Struct'
for name, child in schema.get_children():
assert isinstance(child, Scalar), (
'Only scalar fields are supported in TextFileReader.')
field_types = [
data_type_for_dtype(dtype) for dtype in schema.field_types()]
Reported by Bandit.
Line: 30
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
"""
assert isinstance(schema, Struct), 'Schema must be a schema.Struct'
for name, child in schema.get_children():
assert isinstance(child, Scalar), (
'Only scalar fields are supported in TextFileReader.')
field_types = [
data_type_for_dtype(dtype) for dtype in schema.field_types()]
Reader.__init__(self, schema)
self._reader = init_net.CreateTextFileReader(
Reported by Bandit.
Line: 50
Column: 12
[self._reader],
len(self.schema().field_names()),
batch_size=self._batch_size)
if type(blobs) is core.BlobReference:
blobs = [blobs]
is_empty = net.IsEmpty(
[blobs[0]],
core.ScopedBlobReference(net.NextName('should_stop'))
Reported by Pylint.
caffe2/python/operator_test/checkpoint_test.py
8 issues
Line: 17
Column: 39
"""A simple test case to make sure that the checkpoint behavior is correct.
"""
@unittest.skipIf("LevelDB" not in core.C.registered_dbs(), "Need LevelDB")
def testCheckpoint(self):
temp_root = tempfile.mkdtemp()
net = core.Net("test_checkpoint")
# Note(jiayq): I am being a bit lazy here and am using the old iter
# convention that does not have an input. Optionally change it to the
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core, workspace, test_util
import os
import shutil
import tempfile
Reported by Pylint.
Line: 7
Column: 1
from caffe2.python import core, workspace, test_util
import os
import shutil
import tempfile
import unittest
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python import core, workspace, test_util
import os
import shutil
import tempfile
import unittest
class CheckpointTest(test_util.TestCase):
Reported by Pylint.
Line: 9
Column: 1
from caffe2.python import core, workspace, test_util
import os
import shutil
import tempfile
import unittest
class CheckpointTest(test_util.TestCase):
"""A simple test case to make sure that the checkpoint behavior is correct.
Reported by Pylint.
Line: 10
Column: 1
import os
import shutil
import tempfile
import unittest
class CheckpointTest(test_util.TestCase):
"""A simple test case to make sure that the checkpoint behavior is correct.
"""
Reported by Pylint.
Line: 18
Column: 5
"""
@unittest.skipIf("LevelDB" not in core.C.registered_dbs(), "Need LevelDB")
def testCheckpoint(self):
temp_root = tempfile.mkdtemp()
net = core.Net("test_checkpoint")
# Note(jiayq): I am being a bit lazy here and am using the old iter
# convention that does not have an input. Optionally change it to the
# new style if needed.
Reported by Pylint.
Line: 18
Column: 5
"""
@unittest.skipIf("LevelDB" not in core.C.registered_dbs(), "Need LevelDB")
def testCheckpoint(self):
temp_root = tempfile.mkdtemp()
net = core.Net("test_checkpoint")
# Note(jiayq): I am being a bit lazy here and am using the old iter
# convention that does not have an input. Optionally change it to the
# new style if needed.
Reported by Pylint.
caffe2/python/operator_test/conditional_test.py
8 issues
Line: 8
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestConditionalOp(serial.SerializedTestCase):
@serial.given(rows_num=st.integers(1, 10000), **hu.gcs_cpu_only)
Reported by Pylint.
Line: 14
Column: 46
class TestConditionalOp(serial.SerializedTestCase):
@serial.given(rows_num=st.integers(1, 10000), **hu.gcs_cpu_only)
def test_conditional(self, rows_num, gc, dc):
op = core.CreateOperator(
"Conditional", ["condition", "data_t", "data_f"], "output"
)
data_t = np.random.random((rows_num, 10, 20)).astype(np.float32)
data_f = np.random.random((rows_num, 10, 20)).astype(np.float32)
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 12
Column: 1
import numpy as np
class TestConditionalOp(serial.SerializedTestCase):
@serial.given(rows_num=st.integers(1, 10000), **hu.gcs_cpu_only)
def test_conditional(self, rows_num, gc, dc):
op = core.CreateOperator(
"Conditional", ["condition", "data_t", "data_f"], "output"
)
Reported by Pylint.
Line: 14
Column: 5
class TestConditionalOp(serial.SerializedTestCase):
@serial.given(rows_num=st.integers(1, 10000), **hu.gcs_cpu_only)
def test_conditional(self, rows_num, gc, dc):
op = core.CreateOperator(
"Conditional", ["condition", "data_t", "data_f"], "output"
)
data_t = np.random.random((rows_num, 10, 20)).astype(np.float32)
data_f = np.random.random((rows_num, 10, 20)).astype(np.float32)
Reported by Pylint.
Line: 14
Column: 5
class TestConditionalOp(serial.SerializedTestCase):
@serial.given(rows_num=st.integers(1, 10000), **hu.gcs_cpu_only)
def test_conditional(self, rows_num, gc, dc):
op = core.CreateOperator(
"Conditional", ["condition", "data_t", "data_f"], "output"
)
data_t = np.random.random((rows_num, 10, 20)).astype(np.float32)
data_f = np.random.random((rows_num, 10, 20)).astype(np.float32)
Reported by Pylint.
Line: 14
Column: 5
class TestConditionalOp(serial.SerializedTestCase):
@serial.given(rows_num=st.integers(1, 10000), **hu.gcs_cpu_only)
def test_conditional(self, rows_num, gc, dc):
op = core.CreateOperator(
"Conditional", ["condition", "data_t", "data_f"], "output"
)
data_t = np.random.random((rows_num, 10, 20)).astype(np.float32)
data_f = np.random.random((rows_num, 10, 20)).astype(np.float32)
Reported by Pylint.
Line: 15
Column: 9
class TestConditionalOp(serial.SerializedTestCase):
@serial.given(rows_num=st.integers(1, 10000), **hu.gcs_cpu_only)
def test_conditional(self, rows_num, gc, dc):
op = core.CreateOperator(
"Conditional", ["condition", "data_t", "data_f"], "output"
)
data_t = np.random.random((rows_num, 10, 20)).astype(np.float32)
data_f = np.random.random((rows_num, 10, 20)).astype(np.float32)
condition = np.random.choice(a=[True, False], size=rows_num)
Reported by Pylint.