The following issues were found
caffe2/python/operator_test/mkl_conv_op_test.py
15 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLConvTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
Reported by Pylint.
Line: 29
Column: 42
@settings(max_examples=2, deadline=100)
def test_mkl_convolution(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, gc, dc):
op = core.CreateOperator(
"Conv",
["X", "w", "b"],
["Y"],
stride=stride,
Reported by Pylint.
Line: 50
Column: 5
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
Reported by Pylint.
Line: 17
Column: 1
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLConvTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 8),
input_channels=st.integers(1, 3),
Reported by Pylint.
Line: 27
Column: 5
batch_size=st.integers(1, 3),
**mu.gcs)
@settings(max_examples=2, deadline=100)
def test_mkl_convolution(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, gc, dc):
op = core.CreateOperator(
"Conv",
["X", "w", "b"],
Reported by Pylint.
Line: 27
Column: 5
batch_size=st.integers(1, 3),
**mu.gcs)
@settings(max_examples=2, deadline=100)
def test_mkl_convolution(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, gc, dc):
op = core.CreateOperator(
"Conv",
["X", "w", "b"],
Reported by Pylint.
Line: 27
Column: 5
batch_size=st.integers(1, 3),
**mu.gcs)
@settings(max_examples=2, deadline=100)
def test_mkl_convolution(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, gc, dc):
op = core.CreateOperator(
"Conv",
["X", "w", "b"],
Reported by Pylint.
caffe2/python/mkl/mkl_LRN_op_test.py
15 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLLRNTest(hu.HypothesisTestCase):
@given(input_channels=st.integers(1, 3),
Reported by Pylint.
Line: 28
Column: 30
def test_mkl_LRN(self, input_channels,
batch_size, im_size, order,
gc, dc):
op = core.CreateOperator(
"LRN",
["X"],
["Y", "Y_scale"],
size=5,
Reported by Pylint.
Line: 46
Column: 5
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
Line: 19
Column: 1
"Skipping as we do not have mkldnn.")
class MKLLRNTest(hu.HypothesisTestCase):
@given(input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
im_size=st.integers(1, 10),
order=st.sampled_from(["NCHW"]),
**mu.gcs)
Reported by Pylint.
Line: 25
Column: 5
im_size=st.integers(1, 10),
order=st.sampled_from(["NCHW"]),
**mu.gcs)
def test_mkl_LRN(self, input_channels,
batch_size, im_size, order,
gc, dc):
op = core.CreateOperator(
"LRN",
Reported by Pylint.
Line: 25
Column: 5
im_size=st.integers(1, 10),
order=st.sampled_from(["NCHW"]),
**mu.gcs)
def test_mkl_LRN(self, input_channels,
batch_size, im_size, order,
gc, dc):
op = core.CreateOperator(
"LRN",
Reported by Pylint.
caffe2/python/ideep/test_ideep_net.py
15 issues
Line: 79
Column: 15
return parser
def benchmark(args):
print('Batch size: {}'.format(args.batch_size))
mf = ModelDownloader()
init_net, pred_net, value_info = mf.get_c2_model(args.model)
input_shapes = {k : [args.batch_size] + v[-1][1:] for (k, v) in value_info.items()}
print("input info: {}".format(input_shapes))
Reported by Pylint.
Line: 117
Column: 9
#print("{}".format(name))
workspace.FeedBlob(name, blob, device_option)
workspace.CreateNet(pred_net)
start = time.time()
res = workspace.BenchmarkNet(pred_net.name,
args.warmup_iterations,
args.iterations,
args.layer_wise_benchmark)
print("FPS: {:.2f}".format(1/res[0]*1000*args.batch_size))
Reported by Pylint.
Line: 1
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.models.download import ModelDownloader
import numpy as np
Reported by Pylint.
Line: 10
Column: 1
from caffe2.python import core, workspace
from caffe2.python.models.download import ModelDownloader
import numpy as np
import argparse
import time
def GetArgumentParser():
parser = argparse.ArgumentParser(description="Caffe2 benchmark.")
Reported by Pylint.
Line: 11
Column: 1
from caffe2.python.models.download import ModelDownloader
import numpy as np
import argparse
import time
def GetArgumentParser():
parser = argparse.ArgumentParser(description="Caffe2 benchmark.")
parser.add_argument(
Reported by Pylint.
Line: 14
Column: 1
import time
def GetArgumentParser():
parser = argparse.ArgumentParser(description="Caffe2 benchmark.")
parser.add_argument(
"--batch_size",
type=int,
default=128,
Reported by Pylint.
Line: 14
Column: 1
import time
def GetArgumentParser():
parser = argparse.ArgumentParser(description="Caffe2 benchmark.")
parser.add_argument(
"--batch_size",
type=int,
default=128,
Reported by Pylint.
Line: 79
Column: 1
return parser
def benchmark(args):
print('Batch size: {}'.format(args.batch_size))
mf = ModelDownloader()
init_net, pred_net, value_info = mf.get_c2_model(args.model)
input_shapes = {k : [args.batch_size] + v[-1][1:] for (k, v) in value_info.items()}
print("input info: {}".format(input_shapes))
Reported by Pylint.
Line: 79
Column: 1
return parser
def benchmark(args):
print('Batch size: {}'.format(args.batch_size))
mf = ModelDownloader()
init_net, pred_net, value_info = mf.get_c2_model(args.model)
input_shapes = {k : [args.batch_size] + v[-1][1:] for (k, v) in value_info.items()}
print("input info: {}".format(input_shapes))
Reported by Pylint.
Line: 81
Column: 5
def benchmark(args):
print('Batch size: {}'.format(args.batch_size))
mf = ModelDownloader()
init_net, pred_net, value_info = mf.get_c2_model(args.model)
input_shapes = {k : [args.batch_size] + v[-1][1:] for (k, v) in value_info.items()}
print("input info: {}".format(input_shapes))
external_inputs = {}
for k, v in input_shapes.items():
Reported by Pylint.
caffe2/python/fakelowp/test_utils.py
15 issues
Line: 1
Column: 1
import sys
import numpy as np
def print_test_debug_info(testname, items_dict):
Reported by Pylint.
Line: 9
Column: 1
import sys
import numpy as np
def print_test_debug_info(testname, items_dict):
filename = "debug_operator_onnxifi_" + testname + ".txt"
np.set_printoptions(threshold=sys.maxsize)
with open(filename, 'w') as f:
for key, value in items_dict.items():
print(key, value)
Reported by Pylint.
Line: 12
Column: 33
def print_test_debug_info(testname, items_dict):
filename = "debug_operator_onnxifi_" + testname + ".txt"
np.set_printoptions(threshold=sys.maxsize)
with open(filename, 'w') as f:
for key, value in items_dict.items():
print(key, value)
f.write("{}\n".format(key))
f.write("{}\n".format(value))
Reported by Pylint.
Line: 18
Column: 1
f.write("{}\n".format(key))
f.write("{}\n".format(value))
def print_net(net):
for i in net.external_input:
print("Input: {}".format(i))
for i in net.external_output:
print("Output: {}".format(i))
for op in net.op:
Reported by Pylint.
Line: 23
Column: 9
print("Input: {}".format(i))
for i in net.external_output:
print("Output: {}".format(i))
for op in net.op:
print("Op {}".format(op.type))
for x in op.input:
print(" input: {}".format(x))
for y in op.output:
print(" output: {}".format(y))
Reported by Pylint.
Line: 25
Column: 13
print("Output: {}".format(i))
for op in net.op:
print("Op {}".format(op.type))
for x in op.input:
print(" input: {}".format(x))
for y in op.output:
print(" output: {}".format(y))
def _sigmoid(x):
Reported by Pylint.
Line: 27
Column: 13
print("Op {}".format(op.type))
for x in op.input:
print(" input: {}".format(x))
for y in op.output:
print(" output: {}".format(y))
def _sigmoid(x):
return 1. / (1. + np.exp(np.float64(-x)))
Reported by Pylint.
Line: 30
Column: 1
for y in op.output:
print(" output: {}".format(y))
def _sigmoid(x):
return 1. / (1. + np.exp(np.float64(-x)))
def _tanh(x):
return np.tanh(np.float64(x))
Reported by Pylint.
Line: 33
Column: 1
def _sigmoid(x):
return 1. / (1. + np.exp(np.float64(-x)))
def _tanh(x):
return np.tanh(np.float64(x))
def _swish(x):
return np.float64(x) * _sigmoid(x)
Reported by Pylint.
Line: 36
Column: 1
def _tanh(x):
return np.tanh(np.float64(x))
def _swish(x):
return np.float64(x) * _sigmoid(x)
def _gelu_by_sigmoid(x):
return np.float64(x) / (1. + np.exp(np.float64(x) * 1.702))
Reported by Pylint.
caffe2/python/mkl/mkl_sigmoid_op_test.py
15 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLSigmoidTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5), inplace=st.booleans(),
**mu.gcs)
def test_mkl_sigmoid(self, n, m, inplace, gc, dc):
Reported by Pylint.
Line: 20
Column: 47
class MKLSigmoidTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5), inplace=st.booleans(),
**mu.gcs)
def test_mkl_sigmoid(self, n, m, inplace, gc, dc):
X = np.random.rand(m, n).astype(np.float32)
op = core.CreateOperator(
"Sigmoid",
["X"],
["Y" if not inplace else "X"]
Reported by Pylint.
Line: 31
Column: 5
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
Line: 17
Column: 1
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLSigmoidTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5), inplace=st.booleans(),
**mu.gcs)
def test_mkl_sigmoid(self, n, m, inplace, gc, dc):
X = np.random.rand(m, n).astype(np.float32)
op = core.CreateOperator(
Reported by Pylint.
Line: 20
Column: 5
class MKLSigmoidTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5), inplace=st.booleans(),
**mu.gcs)
def test_mkl_sigmoid(self, n, m, inplace, gc, dc):
X = np.random.rand(m, n).astype(np.float32)
op = core.CreateOperator(
"Sigmoid",
["X"],
["Y" if not inplace else "X"]
Reported by Pylint.
Line: 20
Column: 5
class MKLSigmoidTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5), inplace=st.booleans(),
**mu.gcs)
def test_mkl_sigmoid(self, n, m, inplace, gc, dc):
X = np.random.rand(m, n).astype(np.float32)
op = core.CreateOperator(
"Sigmoid",
["X"],
["Y" if not inplace else "X"]
Reported by Pylint.
Line: 20
Column: 5
class MKLSigmoidTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5), inplace=st.booleans(),
**mu.gcs)
def test_mkl_sigmoid(self, n, m, inplace, gc, dc):
X = np.random.rand(m, n).astype(np.float32)
op = core.CreateOperator(
"Sigmoid",
["X"],
["Y" if not inplace else "X"]
Reported by Pylint.
caffe2/python/net_printer_test.py
15 issues
Line: 1
Column: 1
from caffe2.python import net_printer
from caffe2.python.checkpoint import Job
from caffe2.python.net_builder import ops
from caffe2.python.task import Task, final_output, WorkspaceType
Reported by Pylint.
Line: 10
Column: 1
from caffe2.python.checkpoint import Job
from caffe2.python.net_builder import ops
from caffe2.python.task import Task, final_output, WorkspaceType
import unittest
def example_loop():
with Task():
total = ops.Const(0)
Reported by Pylint.
Line: 13
Column: 1
import unittest
def example_loop():
with Task():
total = ops.Const(0)
total_large = ops.Const(0)
total_small = ops.Const(0)
total_tiny = ops.Const(0)
Reported by Pylint.
Line: 23
Column: 62
outer = ops.Mul([loop.iter(), ops.Const(10)])
with ops.loop(loop.iter()) as inner:
val = ops.Add([outer, inner.iter()])
with ops.If(ops.GE([val, ops.Const(80)])) as c:
ops.Add([total_large, val], [total_large])
with c.Elif(ops.GE([val, ops.Const(50)])) as c:
ops.Add([total_small, val], [total_small])
with c.Else():
ops.Add([total_tiny, val], [total_tiny])
Reported by Pylint.
Line: 25
Column: 62
val = ops.Add([outer, inner.iter()])
with ops.If(ops.GE([val, ops.Const(80)])) as c:
ops.Add([total_large, val], [total_large])
with c.Elif(ops.GE([val, ops.Const(50)])) as c:
ops.Add([total_small, val], [total_small])
with c.Else():
ops.Add([total_tiny, val], [total_tiny])
ops.Add([total, val], total)
Reported by Pylint.
Line: 32
Column: 1
ops.Add([total, val], total)
def example_task():
with Task():
with ops.task_init():
one = ops.Const(1)
two = ops.Add([one, one])
with ops.task_init():
Reported by Pylint.
Line: 47
Column: 9
six = ops.Add([accum, one])
ops.Add([accum, one], [accum])
seven_2 = ops.Add([accum, one])
o6 = final_output(six)
o7_1 = final_output(seven_1)
o7_2 = final_output(seven_2)
with Task(num_instances=2):
with ops.task_init():
Reported by Pylint.
Line: 61
Column: 1
return o6, o7_1, o7_2
def example_job():
with Job() as job:
with job.init_group:
example_loop()
example_task()
return job
Reported by Pylint.
Line: 69
Column: 1
return job
class TestNetPrinter(unittest.TestCase):
def test_print(self):
self.assertTrue(len(net_printer.to_string(example_job())) > 0)
def test_valid_job(self):
job = example_job()
Reported by Pylint.
Line: 70
Column: 5
class TestNetPrinter(unittest.TestCase):
def test_print(self):
self.assertTrue(len(net_printer.to_string(example_job())) > 0)
def test_valid_job(self):
job = example_job()
with job:
Reported by Pylint.
aten/src/ATen/test/scalar_test.cpp
15 issues
Line: 106
Column: 89
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
if (at::hasCUDA()) {
auto r = next_h.to(at::Device(kCUDA), kFloat, /*non_blocking=*/ false, /*copy=*/ true);
ASSERT_TRUE(r.to(at::Device(kCPU), kFloat, /*non_blocking=*/ false, /*copy=*/ true).equal(next_h));
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
ASSERT_NO_THROW(randn({10, 10, 2}, options));
// check Scalar.toTensor on Scalars backed by different data types
Reported by FlawFinder.
Line: 151
Column: 28
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
}
TEST(TestScalar, TestEqual) {
ASSERT_FALSE(Scalar(1.0).equal(false));
ASSERT_FALSE(Scalar(1.0).equal(true));
ASSERT_FALSE(Scalar(true).equal(1.0));
ASSERT_TRUE(Scalar(true).equal(true));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 5.0}).equal(c10::complex<double>{2.0, 5.0}));
Reported by FlawFinder.
Line: 152
Column: 28
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
TEST(TestScalar, TestEqual) {
ASSERT_FALSE(Scalar(1.0).equal(false));
ASSERT_FALSE(Scalar(1.0).equal(true));
ASSERT_FALSE(Scalar(true).equal(1.0));
ASSERT_TRUE(Scalar(true).equal(true));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 5.0}).equal(c10::complex<double>{2.0, 5.0}));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 0}).equal(2.0));
Reported by FlawFinder.
Line: 153
Column: 29
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
TEST(TestScalar, TestEqual) {
ASSERT_FALSE(Scalar(1.0).equal(false));
ASSERT_FALSE(Scalar(1.0).equal(true));
ASSERT_FALSE(Scalar(true).equal(1.0));
ASSERT_TRUE(Scalar(true).equal(true));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 5.0}).equal(c10::complex<double>{2.0, 5.0}));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 0}).equal(2.0));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 0}).equal(2));
Reported by FlawFinder.
Line: 154
Column: 28
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
ASSERT_FALSE(Scalar(1.0).equal(false));
ASSERT_FALSE(Scalar(1.0).equal(true));
ASSERT_FALSE(Scalar(true).equal(1.0));
ASSERT_TRUE(Scalar(true).equal(true));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 5.0}).equal(c10::complex<double>{2.0, 5.0}));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 0}).equal(2.0));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 0}).equal(2));
Reported by FlawFinder.
Line: 156
Column: 54
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
ASSERT_FALSE(Scalar(true).equal(1.0));
ASSERT_TRUE(Scalar(true).equal(true));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 5.0}).equal(c10::complex<double>{2.0, 5.0}));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 0}).equal(2.0));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 0}).equal(2));
ASSERT_TRUE(Scalar(2.0).equal(c10::complex<double>{2.0, 0.0}));
ASSERT_FALSE(Scalar(2.0).equal(c10::complex<double>{2.0, 4.0}));
Reported by FlawFinder.
Line: 157
Column: 52
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
ASSERT_TRUE(Scalar(true).equal(true));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 5.0}).equal(c10::complex<double>{2.0, 5.0}));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 0}).equal(2.0));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 0}).equal(2));
ASSERT_TRUE(Scalar(2.0).equal(c10::complex<double>{2.0, 0.0}));
ASSERT_FALSE(Scalar(2.0).equal(c10::complex<double>{2.0, 4.0}));
ASSERT_FALSE(Scalar(2.0).equal(3.0));
Reported by FlawFinder.
Line: 158
Column: 52
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 5.0}).equal(c10::complex<double>{2.0, 5.0}));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 0}).equal(2.0));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 0}).equal(2));
ASSERT_TRUE(Scalar(2.0).equal(c10::complex<double>{2.0, 0.0}));
ASSERT_FALSE(Scalar(2.0).equal(c10::complex<double>{2.0, 4.0}));
ASSERT_FALSE(Scalar(2.0).equal(3.0));
ASSERT_TRUE(Scalar(2.0).equal(2));
Reported by FlawFinder.
Line: 160
Column: 27
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 0}).equal(2.0));
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 0}).equal(2));
ASSERT_TRUE(Scalar(2.0).equal(c10::complex<double>{2.0, 0.0}));
ASSERT_FALSE(Scalar(2.0).equal(c10::complex<double>{2.0, 4.0}));
ASSERT_FALSE(Scalar(2.0).equal(3.0));
ASSERT_TRUE(Scalar(2.0).equal(2));
ASSERT_TRUE(Scalar(2).equal(c10::complex<double>{2.0, 0}));
Reported by FlawFinder.
Line: 161
Column: 28
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
ASSERT_TRUE(Scalar(c10::complex<double>{2.0, 0}).equal(2));
ASSERT_TRUE(Scalar(2.0).equal(c10::complex<double>{2.0, 0.0}));
ASSERT_FALSE(Scalar(2.0).equal(c10::complex<double>{2.0, 4.0}));
ASSERT_FALSE(Scalar(2.0).equal(3.0));
ASSERT_TRUE(Scalar(2.0).equal(2));
ASSERT_TRUE(Scalar(2).equal(c10::complex<double>{2.0, 0}));
ASSERT_TRUE(Scalar(2).equal(2));
Reported by FlawFinder.
caffe2/python/layers/concat.py
15 issues
Line: 91
Column: 9
assert len(shape) >= axis,\
"Concat expects that limited dimensions of the input tensor"
shapes.append(shape)
logger.info('Concat Layer input shapes: ' + str(shapes))
if axis == 0:
self.output_schema = schema.from_blob_list(
input_record[0],
[self.get_next_blob_reference('output')]
Reported by Pylint.
Line: 110
Column: 9
output_dims = shapes[0]
output_dims[axis - 1] = concat_dim
logger.info('Concat Layer output_dims: ' + str(output_dims))
self.output_schema = schema.Scalar(
(np.float32, output_dims),
self.get_next_blob_reference('output'))
record_to_concat = input_record.fields.values()
Reported by Pylint.
Line: 1
Column: 1
## @package concat
# Module caffe2.python.layers.concat
from caffe2.python import schema
from caffe2.python.layers.layers import (
Reported by Pylint.
Line: 14
Column: 1
)
from future.utils import viewitems
import numpy as np
from collections import defaultdict
import logging
logger = logging.getLogger(__name__)
Reported by Pylint.
Line: 16
Column: 1
import numpy as np
from collections import defaultdict
import logging
logger = logging.getLogger(__name__)
def get_concatenated_feature_to_index(blobs_to_concat):
concat_feature_to_index = defaultdict(list)
Reported by Pylint.
Line: 20
Column: 1
logger = logging.getLogger(__name__)
def get_concatenated_feature_to_index(blobs_to_concat):
concat_feature_to_index = defaultdict(list)
start_pos = 0
for scalar in blobs_to_concat:
num_dims = scalar.dtype.shape[0]
if hasattr(scalar, 'metadata') \
Reported by Pylint.
Line: 29
Column: 20
and hasattr(scalar.metadata, 'feature_specs') \
and hasattr(scalar.metadata.feature_specs, 'feature_to_index') \
and isinstance(scalar.metadata.feature_specs.feature_to_index, dict): # noqa B950
for k, v in scalar.metadata.feature_specs.feature_to_index.items():
concat_feature_to_index[k].extend([start_pos + vi for vi in v])
start_pos += num_dims
return dict(concat_feature_to_index) if concat_feature_to_index.keys() else None
Reported by Pylint.
Line: 67
Column: 5
)
"""
def __init__(self, model, input_record, axis=1, add_axis=0,
name='concat', **kwargs):
super(Concat, self).__init__(model, name, input_record, **kwargs)
self.axis = axis
self.add_axis = add_axis
assert not (axis == 0 and add_axis == 1), \
Reported by Pylint.
Line: 67
Column: 5
)
"""
def __init__(self, model, input_record, axis=1, add_axis=0,
name='concat', **kwargs):
super(Concat, self).__init__(model, name, input_record, **kwargs)
self.axis = axis
self.add_axis = add_axis
assert not (axis == 0 and add_axis == 1), \
Reported by Pylint.
Line: 69
Column: 9
def __init__(self, model, input_record, axis=1, add_axis=0,
name='concat', **kwargs):
super(Concat, self).__init__(model, name, input_record, **kwargs)
self.axis = axis
self.add_axis = add_axis
assert not (axis == 0 and add_axis == 1), \
"It's not allowed to add axis=0"
assert isinstance(input_record, schema.Struct),\
Reported by Pylint.
.circleci/cimodel/lib/conf_tree.py
15 issues
Line: 1
Column: 1
from dataclasses import dataclass, field
from typing import Optional, Dict
def X(val):
"""
Compact way to write a leaf node
"""
return val, []
Reported by Pylint.
Line: 5
Column: 1
from typing import Optional, Dict
def X(val):
"""
Compact way to write a leaf node
"""
return val, []
Reported by Pylint.
Line: 12
Column: 1
return val, []
def XImportant(name):
"""Compact way to write an important (run on PRs) leaf node"""
return (name, [("important", [X(True)])])
@dataclass
Reported by Pylint.
Line: 30
Column: 1
@dataclass
class ConfigNode:
parent: Optional['ConfigNode']
node_name: str
props: Dict[str, str] = field(default_factory=dict)
def get_label(self):
Reported by Pylint.
Line: 35
Column: 5
node_name: str
props: Dict[str, str] = field(default_factory=dict)
def get_label(self):
return self.node_name
# noinspection PyMethodMayBeStatic
def get_children(self):
return []
Reported by Pylint.
Line: 39
Column: 5
return self.node_name
# noinspection PyMethodMayBeStatic
def get_children(self):
return []
def get_parents(self):
return (self.parent.get_parents() + [self.parent.get_label()]) if self.parent else []
Reported by Pylint.
Line: 39
Column: 5
return self.node_name
# noinspection PyMethodMayBeStatic
def get_children(self):
return []
def get_parents(self):
return (self.parent.get_parents() + [self.parent.get_label()]) if self.parent else []
Reported by Pylint.
Line: 42
Column: 5
def get_children(self):
return []
def get_parents(self):
return (self.parent.get_parents() + [self.parent.get_label()]) if self.parent else []
def get_depth(self):
return len(self.get_parents())
Reported by Pylint.
Line: 45
Column: 5
def get_parents(self):
return (self.parent.get_parents() + [self.parent.get_label()]) if self.parent else []
def get_depth(self):
return len(self.get_parents())
def get_node_key(self):
return "%".join(self.get_parents() + [self.get_label()])
Reported by Pylint.
Line: 48
Column: 5
def get_depth(self):
return len(self.get_parents())
def get_node_key(self):
return "%".join(self.get_parents() + [self.get_label()])
def find_prop(self, propname, searched=None):
"""
Checks if its own dictionary has
Reported by Pylint.
caffe2/python/ideep/moment_sgd_op_test.py
15 issues
Line: 7
Column: 1
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 11
Column: 1
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestMomentumSGDUpdateOps(hu.HypothesisTestCase):
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestMomentumSGDUpdateOps(hu.HypothesisTestCase):
@given(n=st.integers(4, 8), nesterov=st.booleans(),
**mu.gcs)
def test_MomentumSGDUpdate(self, n, nesterov, gc, dc):
param = np.random.rand(n).astype(np.float32)
Reported by Pylint.
Line: 19
Column: 51
class TestMomentumSGDUpdateOps(hu.HypothesisTestCase):
@given(n=st.integers(4, 8), nesterov=st.booleans(),
**mu.gcs)
def test_MomentumSGDUpdate(self, n, nesterov, gc, dc):
param = np.random.rand(n).astype(np.float32)
grad = np.random.rand(n).astype(np.float32)
lr = np.random.rand(1).astype(np.float32)
param_momentum = np.random.rand(n).astype(np.float32)
momentum = 0.9
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 8
Column: 1
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 12
Column: 1
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestMomentumSGDUpdateOps(hu.HypothesisTestCase):
@given(n=st.integers(4, 8), nesterov=st.booleans(),
Reported by Pylint.
Line: 16
Column: 1
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestMomentumSGDUpdateOps(hu.HypothesisTestCase):
@given(n=st.integers(4, 8), nesterov=st.booleans(),
**mu.gcs)
def test_MomentumSGDUpdate(self, n, nesterov, gc, dc):
param = np.random.rand(n).astype(np.float32)
grad = np.random.rand(n).astype(np.float32)
Reported by Pylint.
Line: 19
Column: 5
class TestMomentumSGDUpdateOps(hu.HypothesisTestCase):
@given(n=st.integers(4, 8), nesterov=st.booleans(),
**mu.gcs)
def test_MomentumSGDUpdate(self, n, nesterov, gc, dc):
param = np.random.rand(n).astype(np.float32)
grad = np.random.rand(n).astype(np.float32)
lr = np.random.rand(1).astype(np.float32)
param_momentum = np.random.rand(n).astype(np.float32)
momentum = 0.9
Reported by Pylint.
Line: 19
Column: 5
class TestMomentumSGDUpdateOps(hu.HypothesisTestCase):
@given(n=st.integers(4, 8), nesterov=st.booleans(),
**mu.gcs)
def test_MomentumSGDUpdate(self, n, nesterov, gc, dc):
param = np.random.rand(n).astype(np.float32)
grad = np.random.rand(n).astype(np.float32)
lr = np.random.rand(1).astype(np.float32)
param_momentum = np.random.rand(n).astype(np.float32)
momentum = 0.9
Reported by Pylint.