The following issues were found
caffe2/contrib/playground/output_generator.py
5 issues
Line: 8
Column: 27
from caffe2.python import timeout_guard
def fun_conclude_operator(self):
# Ensure the program exists. This is to "fix" some unknown problems
# causing the job sometimes get stuck.
timeout_guard.EuthanizeIfNecessary(600.0)
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import timeout_guard
def fun_conclude_operator(self):
# Ensure the program exists. This is to "fix" some unknown problems
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python import timeout_guard
def fun_conclude_operator(self):
# Ensure the program exists. This is to "fix" some unknown problems
# causing the job sometimes get stuck.
timeout_guard.EuthanizeIfNecessary(600.0)
Reported by Pylint.
Line: 14
Column: 1
timeout_guard.EuthanizeIfNecessary(600.0)
def assembleAllOutputs(self):
output = {}
output['train_model'] = self.train_model
output['test_model'] = self.test_model
output['model'] = self.model_output
output['metrics'] = self.metrics_output
Reported by Pylint.
Line: 14
Column: 1
timeout_guard.EuthanizeIfNecessary(600.0)
def assembleAllOutputs(self):
output = {}
output['train_model'] = self.train_model
output['test_model'] = self.test_model
output['model'] = self.model_output
output['metrics'] = self.metrics_output
Reported by Pylint.
aten/src/ATen/cpu/vec/vec512/vec512_qint.h
5 issues
Line: 259
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
void store(void* ptr, int count = size()) const {
if (count != size()) {
memcpy(ptr, &vals, count * sizeof(value_type));
} else {
_mm512_storeu_si512((__m512i*)ptr, vals);
}
}
Reported by FlawFinder.
Line: 439
Column: 13
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
void store(void* ptr, int count = size()) const {
if (count != size()) {
memcpy(ptr, &vals, count * sizeof(value_type));
} else {
_mm512_storeu_si512((__m512i*)ptr, vals);
}
}
Reported by FlawFinder.
Line: 604
Column: 13
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
void store(void* ptr, int count = size()) const {
if (count != size()) {
memcpy(ptr, &vals, count * sizeof(value_type));
} else {
_mm512_storeu_si512((__m512i*)ptr, vals);
}
}
Reported by FlawFinder.
Line: 771
Column: 5
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
}
VectorizedQuantizedConverter(const void* ptr) {
memcpy(vals.data(), ptr, sizeof(value_type) * size());
}
void store(void* ptr, int count = size()) const {
memcpy(ptr, vals.data(), count * sizeof(value_type));
}
Reported by FlawFinder.
Line: 775
Column: 5
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
}
void store(void* ptr, int count = size()) const {
memcpy(ptr, vals.data(), count * sizeof(value_type));
}
float_vec_return_type dequantize(
Vectorized<float> scale,
Vectorized<float> zero_point,
Reported by FlawFinder.
aten/src/ATen/test/test_assert.h
5 issues
Line: 9
Column: 3
CWE codes:
134
Suggestion:
Use a constant for the format specification
char msg[2048];
va_list args;
va_start(args, fmt);
vsnprintf(msg, 2048, fmt, args);
va_end(args);
throw std::runtime_error(msg);
}
#if defined(_MSC_VER) && _MSC_VER <= 1900
Reported by FlawFinder.
Line: 6
Column: 3
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
#include <stdarg.h>
static inline void barf(const char *fmt, ...) {
char msg[2048];
va_list args;
va_start(args, fmt);
vsnprintf(msg, 2048, fmt, args);
va_end(args);
throw std::runtime_error(msg);
Reported by FlawFinder.
Line: 58
Column: 44
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
ASSERT(t1.allclose(t2));
// allclose broadcasts, so check same size before allclose.
#define ASSERT_ALLCLOSE_TOLERANCES(t1, t2, atol, rtol) \
ASSERT(t1.is_same_size(t2)); \
ASSERT(t1.allclose(t2, atol, rtol));
Reported by FlawFinder.
Line: 60
Column: 26
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
// allclose broadcasts, so check same size before allclose.
#define ASSERT_ALLCLOSE_TOLERANCES(t1, t2, atol, rtol) \
ASSERT(t1.is_same_size(t2)); \
ASSERT(t1.allclose(t2, atol, rtol));
Reported by FlawFinder.
Line: 50
Column: 13
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
ASSERT_THROWSM(fn, "");
#define ASSERT_EQUAL(t1, t2) \
ASSERT(t1.equal(t2));
// allclose broadcasts, so check same size before allclose.
#define ASSERT_ALLCLOSE(t1, t2) \
ASSERT(t1.is_same_size(t2)); \
ASSERT(t1.allclose(t2));
Reported by FlawFinder.
aten/src/ATen/test/vec_test_all_types.cpp
5 issues
Line: 638
using vec = TypeParam;
using VT = ValueType<TypeParam>;
test_binary<vec>(
NAME_INFO(== ),
[](const VT& v1, const VT& v2) {return func_cmp(std::equal_to<VT>(), v1, v2); },
[](const vec& v0, const vec& v1) { return v0 == v1; },
createDefaultBinaryTestCase<vec>(TestSeed(), true));
}
TYPED_TEST(Comparison, NotEqual) {
Reported by Cppcheck.
Line: 106
Column: 30
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
using VT = ValueType<TypeParam>;
constexpr size_t b_size = vec::size() * sizeof(VT);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
CACHE_ALIGN unsigned char ref_storage[128 * b_size];
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
CACHE_ALIGN unsigned char storage[128 * b_size];
auto seed = TestSeed();
ValueGen<unsigned char> generator(seed);
for (auto& x : ref_storage) {
Reported by FlawFinder.
Line: 108
Column: 30
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
CACHE_ALIGN unsigned char ref_storage[128 * b_size];
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
CACHE_ALIGN unsigned char storage[128 * b_size];
auto seed = TestSeed();
ValueGen<unsigned char> generator(seed);
for (auto& x : ref_storage) {
x = generator.get();
}
Reported by FlawFinder.
Line: 856
Column: 18
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
// generate expected_val
for (int64_t i = 0; i < vec::size(); i++) {
int64_t hex_mask = 0;
std::memcpy(&hex_mask, &mask[i], sizeof(VT));
expected_val[i] = (hex_mask & 0x01) ? b[i] : a[i];
}
// test with blendv
auto vec_a = vec::loadu(a);
auto vec_b = vec::loadu(b);
Reported by FlawFinder.
Line: 886
Column: 16
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
}
else {
int64_t hex_mask = 0xFFFFFFFFFFFFFFFF;
std::memcpy(&mask[idx], &hex_mask, sizeof(VT));
}
if (!test_blendv<vec, VT, idx+1, N>(expected_val, a, b, mask)) return false;
mask[idx] = m;
return true;
}
Reported by FlawFinder.
caffe2/contrib/playground/resnetdemo/rendezvous_filestore.py
5 issues
Line: 19
Column: 30
# each of them are waiting for different rendezvous to join, they will
# never wait for each other and therefore timeout eventually.
def gen_rendezvous_ctx(self, model, dataset, is_train):
if self.opts['distributed']['num_shards'] < 2:
return None
# have issue when try to set this up on more shards
workspace.RunOperatorOnce(
core.CreateOperator(
Reported by Pylint.
Line: 19
Column: 37
# each of them are waiting for different rendezvous to join, they will
# never wait for each other and therefore timeout eventually.
def gen_rendezvous_ctx(self, model, dataset, is_train):
if self.opts['distributed']['num_shards'] < 2:
return None
# have issue when try to set this up on more shards
workspace.RunOperatorOnce(
core.CreateOperator(
Reported by Pylint.
Line: 26
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b108_hardcoded_tmp_directory.html
workspace.RunOperatorOnce(
core.CreateOperator(
"FileStoreHandlerCreate", [], ["store_handler"],
path="/tmp",
prefix="epoch.{}".format(self.epoch),
)
)
rendezvous = dict(
Reported by Bandit.
Line: 1
Column: 1
from caffe2.python import core, workspace
from caffe2.python import dyndep
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:file_store_handler_ops')
Reported by Pylint.
Line: 19
Column: 1
# each of them are waiting for different rendezvous to join, they will
# never wait for each other and therefore timeout eventually.
def gen_rendezvous_ctx(self, model, dataset, is_train):
if self.opts['distributed']['num_shards'] < 2:
return None
# have issue when try to set this up on more shards
workspace.RunOperatorOnce(
core.CreateOperator(
Reported by Pylint.
caffe2/python/fakefp16_transform_lib.py
5 issues
Line: 7
Column: 1
import caffe2.python._import_c_extension as C
from caffe2.proto.caffe2_pb2 import NetDef
def fakeFp16FuseOps(net : NetDef) -> NetDef:
net_str = net.SerializeToString()
out_str = C.fakeFp16FuseOps(net_str)
Reported by Pylint.
Line: 12
Column: 15
def fakeFp16FuseOps(net : NetDef) -> NetDef:
net_str = net.SerializeToString()
out_str = C.fakeFp16FuseOps(net_str)
out_net = NetDef()
out_net.ParseFromString(out_str)
return out_net
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
import caffe2.python._import_c_extension as C
from caffe2.proto.caffe2_pb2 import NetDef
def fakeFp16FuseOps(net : NetDef) -> NetDef:
Reported by Pylint.
Line: 9
Column: 1
import caffe2.python._import_c_extension as C
from caffe2.proto.caffe2_pb2 import NetDef
def fakeFp16FuseOps(net : NetDef) -> NetDef:
net_str = net.SerializeToString()
out_str = C.fakeFp16FuseOps(net_str)
out_net = NetDef()
out_net.ParseFromString(out_str)
Reported by Pylint.
Line: 9
Column: 1
import caffe2.python._import_c_extension as C
from caffe2.proto.caffe2_pb2 import NetDef
def fakeFp16FuseOps(net : NetDef) -> NetDef:
net_str = net.SerializeToString()
out_str = C.fakeFp16FuseOps(net_str)
out_net = NetDef()
out_net.ParseFromString(out_str)
Reported by Pylint.
caffe2/onnx/backend.cc
5 issues
Line: 48
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
const void* src_ptr = static_cast<const void*>(onnx_tensor.raw_data().data());
field->Resize(num_elements, 0);
void* target_ptr = static_cast<void*>(field->mutable_data());
memcpy(target_ptr, src_ptr, raw_size);
return true;
}
bool IsOperator(const std::string& op_type) {
Reported by FlawFinder.
Line: 1801
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
CAFFE_ENFORCE(onnx_tensor.raw_data().size() == sizeof(float));
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
float f;
memcpy(&f, onnx_tensor.raw_data().c_str(), sizeof(float));
c2_values->set_f(f);
}
} else if (onnx_tensor.data_type() == TensorProto::DOUBLE) {
c2_dtype->set_i(caffe2::TensorProto::DOUBLE);
if (onnx_tensor.double_data_size() > 0) {
Reported by FlawFinder.
Line: 1812
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
CAFFE_ENFORCE(onnx_tensor.raw_data().size() == sizeof(double));
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double d;
memcpy(&d, onnx_tensor.raw_data().c_str(), sizeof(double));
c2_values->set_f(static_cast<float>(d));
}
} else if (onnx_tensor.data_type() == TensorProto::INT64) {
c2_dtype->set_i(caffe2::TensorProto::INT64);
if (onnx_tensor.int64_data_size() > 0) {
Reported by FlawFinder.
Line: 1823
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
CAFFE_ENFORCE(onnx_tensor.raw_data().size() == sizeof(int64_t));
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t i;
memcpy(&i, onnx_tensor.raw_data().c_str(), sizeof(int64_t));
c2_values->set_i(i);
}
} else if (onnx_tensor.data_type() == TensorProto::INT32) {
c2_dtype->set_i(caffe2::TensorProto::INT32);
if (onnx_tensor.int32_data_size() > 0) {
Reported by FlawFinder.
Line: 1834
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
CAFFE_ENFORCE(onnx_tensor.raw_data().size() == sizeof(int32_t));
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int32_t i;
memcpy(&i, onnx_tensor.raw_data().c_str(), sizeof(int32_t));
c2_values->set_i(i);
}
} else {
// TODO: to support more data type
std::stringstream oss;
Reported by FlawFinder.
benchmarks/fastrnns/conftest.py
5 issues
Line: 1
Column: 1
import pytest # noqa: F401
default_rnns = ['cudnn', 'aten', 'jit', 'jit_premul', 'jit_premul_bias', 'jit_simple',
'jit_multilayer', 'py']
default_cnns = ['resnet18', 'resnet18_jit', 'resnet50', 'resnet50_jit']
all_nets = default_rnns + default_cnns
def pytest_generate_tests(metafunc):
# This creates lists of tests to generate, can be customized
Reported by Pylint.
Line: 1
Column: 1
import pytest # noqa: F401
default_rnns = ['cudnn', 'aten', 'jit', 'jit_premul', 'jit_premul_bias', 'jit_simple',
'jit_multilayer', 'py']
default_cnns = ['resnet18', 'resnet18_jit', 'resnet50', 'resnet50_jit']
all_nets = default_rnns + default_cnns
def pytest_generate_tests(metafunc):
# This creates lists of tests to generate, can be customized
Reported by Pylint.
Line: 1
Column: 1
import pytest # noqa: F401
default_rnns = ['cudnn', 'aten', 'jit', 'jit_premul', 'jit_premul_bias', 'jit_simple',
'jit_multilayer', 'py']
default_cnns = ['resnet18', 'resnet18_jit', 'resnet50', 'resnet50_jit']
all_nets = default_rnns + default_cnns
def pytest_generate_tests(metafunc):
# This creates lists of tests to generate, can be customized
Reported by Pylint.
Line: 8
Column: 1
default_cnns = ['resnet18', 'resnet18_jit', 'resnet50', 'resnet50_jit']
all_nets = default_rnns + default_cnns
def pytest_generate_tests(metafunc):
# This creates lists of tests to generate, can be customized
if metafunc.cls.__name__ == "TestBenchNetwork":
metafunc.parametrize('net_name', all_nets, scope="class")
metafunc.parametrize("executor", [metafunc.config.getoption("executor")], scope="class")
metafunc.parametrize("fuser", [metafunc.config.getoption("fuser")], scope="class")
Reported by Pylint.
Line: 15
Column: 1
metafunc.parametrize("executor", [metafunc.config.getoption("executor")], scope="class")
metafunc.parametrize("fuser", [metafunc.config.getoption("fuser")], scope="class")
def pytest_addoption(parser):
parser.addoption("--fuser", default="old", help="fuser to use for benchmarks")
parser.addoption("--executor", default="legacy", help="executor to use for benchmarks")
Reported by Pylint.
caffe2/python/layers/position_weighted.py
5 issues
Line: 36
Column: 17
else:
self.shape = get_categorical_limit(input_record)
logger.warning(
'{}: categorical_limit of lengths is not available, using '
'categorical_limit of the keys: {}'.format(
str(input_record.lengths()), self.shape))
self.pos_w = self.create_param(param_name='pos_w',
shape=[self.shape, ],
Reported by Pylint.
Line: 1
Column: 1
## @package position_weighted
# Module caffe2.python.layers.position_weighted
import logging
import numpy as np
Reported by Pylint.
Line: 22
Column: 1
logger = logging.getLogger(__name__)
class PositionWeighted(ModelLayer):
def __init__(self, model, input_record, weight_optim=None,
name="position_weights"):
super(PositionWeighted, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.List), "Incorrect input type"
Reported by Pylint.
Line: 25
Column: 9
class PositionWeighted(ModelLayer):
def __init__(self, model, input_record, weight_optim=None,
name="position_weights"):
super(PositionWeighted, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.List), "Incorrect input type"
length_metadata = input_record.lengths.metadata
max_length = (length_metadata.categorical_limit if length_metadata is
not None else None)
Reported by Pylint.
Line: 27
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
name="position_weights"):
super(PositionWeighted, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.List), "Incorrect input type"
length_metadata = input_record.lengths.metadata
max_length = (length_metadata.categorical_limit if length_metadata is
not None else None)
if max_length is not None:
self.shape = max_length
Reported by Bandit.
caffe2/python/layers/batch_sigmoid_cross_entropy_loss.py
5 issues
Line: 1
Column: 1
## @package batch_sigmoid_cross_entropy_loss
# Module caffe2.python.layers.batch_sigmoid_cross_entropy_loss
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
Reported by Pylint.
Line: 14
Column: 1
import numpy as np
class BatchSigmoidCrossEntropyLoss(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_sigmoid_cross_entropy_loss',
Reported by Pylint.
Line: 22
Column: 9
name='batch_sigmoid_cross_entropy_loss',
**kwargs
):
super(BatchSigmoidCrossEntropyLoss, self).__init__(
model, name, input_record, **kwargs)
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar(np.float32)),
Reported by Pylint.
Line: 25
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
super(BatchSigmoidCrossEntropyLoss, self).__init__(
model, name, input_record, **kwargs)
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar(np.float32)),
('prediction', schema.Scalar(np.float32)),
),
input_record
Reported by Bandit.
Line: 32
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
),
input_record
)
assert input_record.prediction.field_type().shape == \
input_record.label.field_type().shape, \
"prediction and label must have the same shape"
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
Reported by Bandit.