The following issues were found
caffe2/distributed/store_ops_test_util.py
5 issues
Line: 1
Column: 1
## @package store_ops_test_util
# Module caffe2.distributed.store_ops_test_util
from multiprocessing import Process, Queue
Reported by Pylint.
Line: 15
Column: 1
from caffe2.python import core, workspace
class StoreOpsTests(object):
@classmethod
def _test_set_get(cls, queue, create_store_handler_fn, index, num_procs):
store_handler = create_store_handler_fn()
blob = "blob"
value = np.full(1, 1, np.float32)
Reported by Pylint.
Line: 15
Column: 1
from caffe2.python import core, workspace
class StoreOpsTests(object):
@classmethod
def _test_set_get(cls, queue, create_store_handler_fn, index, num_procs):
store_handler = create_store_handler_fn()
blob = "blob"
value = np.full(1, 1, np.float32)
Reported by Pylint.
Line: 49
Column: 5
workspace.ResetWorkspace()
@classmethod
def test_set_get(cls, create_store_handler_fn):
# Queue for assertion errors on subprocesses
queue = Queue()
# Start N processes in the background
num_procs = 4
Reported by Pylint.
Line: 72
Column: 5
raise queue.get()
@classmethod
def test_get_timeout(cls, create_store_handler_fn):
store_handler = create_store_handler_fn()
net = core.Net('get_missing_blob')
net.StoreGet([store_handler], 1, blob_name='blob')
workspace.RunNetOnce(net)
Reported by Pylint.
aten/src/ATen/test/ivalue_test.cpp
5 issues
Line: 93
ASSERT_EQ(complex_tuple.toTuple()->elements()[1], foo1);
}
TEST(IValueTest, ComplexDict) {
typedef c10::complex<double> c_type;
c10::Dict<c_type, c_type> m;
auto num1 = c_type(2.3, -3.5);
auto num2 = c_type(0, 5);
m.insert(num1, 2 * num1);
Reported by Cppcheck.
Line: 61
Column: 31
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
ASSERT_EQ(tv.use_count(), 2);
auto ten2 = ten;
ASSERT_EQ(tv.use_count(), 3);
ASSERT_TRUE(ten2.toTensor().equal(ten.toTensor()));
std::move(ten2).toTensor();
ASSERT_EQ(tv.use_count(), 2);
auto elem1 = c10::complex<double>(3, 4);
auto elem2 = c10::complex<double>(3, -4);
Reported by FlawFinder.
Line: 117
Column: 30
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
#define EXPECT_IVALUE_EQ(a, b) \
EXPECT_EQ((a).isTensor(), (b).isTensor()); \
if ((a).isTensor()) { \
EXPECT_TRUE(a.toTensor().equal(b.toTensor())); \
} else { \
EXPECT_EQ(a, b); \
}
TEST(IValueTest, Swap) {
Reported by FlawFinder.
Line: 268
Column: 29
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
ASSERT_TRUE(c1 == c3);
ASSERT_TRUE(c1.isScalar());
ASSERT_TRUE(c2.toScalar().equal(c_));
}
TEST(IValueTest, BasicFuture) {
auto f1 = c10::make_intrusive<ivalue::Future>(IntType::get());
ASSERT_FALSE(f1->completed());
Reported by FlawFinder.
Line: 373
Column: 35
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
IValue eqTensor = t.equals(tCopy);
EXPECT_TRUE(eqTensor.isTensor());
auto booleanTrue = torch::ones({2, 3}).to(torch::kBool);
EXPECT_TRUE(eqTensor.toTensor().equal(booleanTrue));
// Test identity checking
EXPECT_TRUE(t.is(t));
EXPECT_FALSE(t.is(tCopy));
// NOLINTNEXTLINE(performance-unnecessary-copy-initialization)
Reported by FlawFinder.
caffe2/python/layers/uniform_sampling.py
5 issues
Line: 1
Column: 1
## @package uniform_sampling
# Module caffe2.python.layers.uniform_sampling
import numpy as np
Reported by Pylint.
Line: 21
Column: 5
the samples. input_record is expected to be unique.
"""
def __init__(
self,
model,
input_record,
num_samples,
num_elements,
Reported by Pylint.
Line: 30
Column: 9
name='uniform_sampling',
**kwargs
):
super(UniformSampling, self).__init__(
model, name, input_record, **kwargs
)
assert num_elements > num_samples > 0
assert isinstance(input_record, schema.Scalar)
Reported by Pylint.
Line: 34
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
model, name, input_record, **kwargs
)
assert num_elements > num_samples > 0
assert isinstance(input_record, schema.Scalar)
self.num_elements = num_elements
num_examples_init = ('GivenTensorInt64Fill',
Reported by Bandit.
Line: 35
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
)
assert num_elements > num_samples > 0
assert isinstance(input_record, schema.Scalar)
self.num_elements = num_elements
num_examples_init = ('GivenTensorInt64Fill',
{'values': [num_samples]})
Reported by Bandit.
caffe2/python/layers/merge_id_lists.py
5 issues
Line: 1
Column: 1
from caffe2.python import schema
from caffe2.python.layers.layers import (
get_categorical_limit,
ModelLayer,
Reported by Pylint.
Line: 28
Column: 9
the merged ID_LIST feature
"""
def __init__(self, model, input_record, name='merged'):
super(MergeIdLists, self).__init__(model, name, input_record)
assert all(schema.equal_schemas(x, IdList) for x in input_record), \
"Inputs to MergeIdLists should all be IdLists."
assert all(record.items.metadata is not None
for record in self.input_record), \
Reported by Pylint.
Line: 29
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
"""
def __init__(self, model, input_record, name='merged'):
super(MergeIdLists, self).__init__(model, name, input_record)
assert all(schema.equal_schemas(x, IdList) for x in input_record), \
"Inputs to MergeIdLists should all be IdLists."
assert all(record.items.metadata is not None
for record in self.input_record), \
"Features without metadata are not supported"
Reported by Bandit.
Line: 32
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert all(schema.equal_schemas(x, IdList) for x in input_record), \
"Inputs to MergeIdLists should all be IdLists."
assert all(record.items.metadata is not None
for record in self.input_record), \
"Features without metadata are not supported"
merge_dim = max(get_categorical_limit(record)
for record in self.input_record)
Reported by Bandit.
Line: 38
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
merge_dim = max(get_categorical_limit(record)
for record in self.input_record)
assert merge_dim is not None, "Unbounded features are not supported"
self.output_schema = schema.NewRecord(
model.net, schema.List(
schema.Scalar(
np.int64,
Reported by Bandit.
aten/src/ATen/native/quantized/cpu/qnnpack/configure.py
5 issues
Line: 9
Column: 1
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import confu
from confu import arm, x86
parser = confu.standard_parser()
Reported by Pylint.
Line: 10
Column: 1
# LICENSE file in the root directory of this source tree.
import confu
from confu import arm, x86
parser = confu.standard_parser()
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import confu
Reported by Pylint.
Line: 16
Column: 1
parser = confu.standard_parser()
def main(args):
options = parser.parse_args(args)
build = confu.Build.from_options(options)
build.export_cpath("include", ["q8gemm.h"])
Reported by Pylint.
Line: 16
Column: 1
parser = confu.standard_parser()
def main(args):
options = parser.parse_args(args)
build = confu.Build.from_options(options)
build.export_cpath("include", ["q8gemm.h"])
Reported by Pylint.
caffe2/perfkernels/math_cpu_base.cc
5 issues
Line: 29
Column: 10
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
uint8_t* output_data,
uint64_t input_size,
uint64_t bitwidth,
bool random,
const float* random_buffer) {
uint64_t data_per_byte = 8 / bitwidth;
uint64_t tail = input_size % data_per_byte;
tail = tail ? data_per_byte - tail : 0;
uint64_t segment_size = (input_size + data_per_byte - 1) / data_per_byte;
Reported by FlawFinder.
Line: 55
Column: 7
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
float gap_inverse = 1. / (gap + QEPSILON);
uint8_t max_q = (1 << bitwidth) - 1;
uint64_t bit_start = 0;
if (random) {
for (uint64_t start = 0; start < input_size; start += segment_size) {
uint64_t stride = start + segment_size <= input_size ? segment_size
: input_size - start;
uint64_t i = 0;
for (; i < stride; ++i) {
Reported by FlawFinder.
Line: 103
Column: 10
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
uint8_t* output_data,
uint64_t input_size,
uint64_t bitwidth,
bool random,
const float* random_buffer) {
AVX2_DO(
quantize_and_compress,
input_data,
output_data,
Reported by FlawFinder.
Line: 111
Column: 7
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
output_data,
input_size,
bitwidth,
random,
random_buffer);
BASE_DO(
quantize_and_compress,
input_data,
output_data,
Reported by FlawFinder.
Line: 119
Column: 7
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
output_data,
input_size,
bitwidth,
random,
random_buffer);
}
void decompress_and_dequantize__base(
const uint8_t* input_data,
Reported by FlawFinder.
aten/src/ATen/native/cpu/ReduceOpsKernel.cpp
5 issues
Line: 122
CWE codes:
628
// Reference : https://www.tensorflow.org/api_docs/python/tf/math/cumulative_logsumexp
auto log_add_exp = [](scalar_t x, scalar_t y) -> scalar_t {
scalar_t min = std::isnan(y) ? y : std::min(x,y); //std::min returns first arg if one of the args is nan
scalar_t max = std::isnan(y) ? y : std::max(x,y); //std::max returns first arg if one of the args is nan
if (min != max || std::isfinite(min)) {
// nan will be propagated here
return std::log1p(std::exp(min - max)) + max;
} else {
Reported by Cppcheck.
Line: 123
CWE codes:
628
// Reference : https://www.tensorflow.org/api_docs/python/tf/math/cumulative_logsumexp
auto log_add_exp = [](scalar_t x, scalar_t y) -> scalar_t {
scalar_t min = std::isnan(y) ? y : std::min(x,y); //std::min returns first arg if one of the args is nan
scalar_t max = std::isnan(y) ? y : std::max(x,y); //std::max returns first arg if one of the args is nan
if (min != max || std::isfinite(min)) {
// nan will be propagated here
return std::log1p(std::exp(min - max)) + max;
} else {
// special case to correctly handle infinite cases
Reported by Cppcheck.
Line: 124
CWE codes:
628
auto log_add_exp = [](scalar_t x, scalar_t y) -> scalar_t {
scalar_t min = std::isnan(y) ? y : std::min(x,y); //std::min returns first arg if one of the args is nan
scalar_t max = std::isnan(y) ? y : std::max(x,y); //std::max returns first arg if one of the args is nan
if (min != max || std::isfinite(min)) {
// nan will be propagated here
return std::log1p(std::exp(min - max)) + max;
} else {
// special case to correctly handle infinite cases
return x;
Reported by Cppcheck.
Line: 204
CWE codes:
908
// In the dispatch code blocks below, reduction kernels accumulate results as
// the type `acc_t`. When `scalar_t` is complex, `acc_t` is the downgraded
// real number type. Otherwise, `acc_t` and `scalar_t` are the same type.
if (val == 0) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, iter.input_dtype(), "norm_cpu", [&] {
using acc_t = typename scalar_value_type<scalar_t>::type;
binary_kernel_reduce(
iter,
NormZeroOps<scalar_t, acc_t>(),
Reported by Cppcheck.
Line: 213
CWE codes:
908
acc_t(0)
);
});
} else if (val == 1) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, iter.input_dtype(), "norm_cpu", [&] {
using acc_t = typename scalar_value_type<scalar_t>::type;
binary_kernel_reduce(
iter,
NormOneOps<scalar_t, acc_t>(),
Reported by Cppcheck.
caffe2/python/benchmarks/fused_rowwise_nbit_conversion_bench.py
5 issues
Line: 6
Column: 1
import argparse
import numpy as np
from caffe2.python import core, workspace
def main(bit_rate):
# uncomment for debugging
# np.random.seed(0)
Reported by Pylint.
Line: 1
Column: 1
import argparse
import numpy as np
from caffe2.python import core, workspace
def main(bit_rate):
Reported by Pylint.
Line: 9
Column: 1
from caffe2.python import core, workspace
def main(bit_rate):
# uncomment for debugging
# np.random.seed(0)
batchsize = 10 * 1000
blocksize = 64
print(batchsize, blocksize)
Reported by Pylint.
Line: 20
Column: 5
workspace.FeedBlob("input_data", input_data)
net = core.Net("bench")
op = core.CreateOperator(
"FloatToFused" + str(bit_rate) + "BitRowwiseQuantized",
"input_data",
"quantized_data",
engine="GREEDY",
)
Reported by Pylint.
Line: 33
Column: 5
workspace.BenchmarkNet(net.Proto().name, 1, iterations, True)
net2 = core.Net("bench2")
op = core.CreateOperator(
"FloatToFused" + str(bit_rate) + "BitRowwiseQuantized",
"input_data",
"quantized_data",
)
net2.Proto().op.extend([op])
Reported by Pylint.
benchmarks/distributed/rpc/parameter_server/trainer/hooks.py
4 issues
Line: 3
Column: 1
from utils import process_bucket_with_remote_server
import torch
import torch.distributed as c10d
def allreduce_hook(state, bucket):
r"""
A ddp communication hook that uses the process_group allreduce implementation.
Reported by Pylint.
Line: 4
Column: 1
from utils import process_bucket_with_remote_server
import torch
import torch.distributed as c10d
def allreduce_hook(state, bucket):
r"""
A ddp communication hook that uses the process_group allreduce implementation.
Reported by Pylint.
Line: 1
Column: 1
from utils import process_bucket_with_remote_server
import torch
import torch.distributed as c10d
def allreduce_hook(state, bucket):
r"""
A ddp communication hook that uses the process_group allreduce implementation.
Reported by Pylint.
Line: 85
Column: 5
bucket (GradBucket): gradient bucket
"""
tensor = bucket.buffer()
if tensor.is_sparse:
return process_bucket_with_remote_server(state, bucket)
else:
cref = state.cref
tensor = [tensor / state.process_group.size()]
key = state.get_key(bucket.get_index())
Reported by Pylint.
aten/src/ATen/test/cpu_profiling_allocator_test.cpp
4 issues
Line: 154
Column: 26
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
// profiling allocator should not throw.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
ASSERT_NO_THROW(validate_allocation_plan(true, true, false));
ASSERT_TRUE(ref_output.equal(output));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
ASSERT_NO_THROW(validate_allocation_plan(false, false, false));
ASSERT_TRUE(ref_output.equal(output));
// Furthermore profiling allocator should return the same pointers
// back for the intermediate tensors
Reported by FlawFinder.
Line: 157
Column: 26
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
ASSERT_TRUE(ref_output.equal(output));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
ASSERT_NO_THROW(validate_allocation_plan(false, false, false));
ASSERT_TRUE(ref_output.equal(output));
// Furthermore profiling allocator should return the same pointers
// back for the intermediate tensors
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
ASSERT_NO_THROW(validate_allocation_plan(true, true, true));
ASSERT_TRUE(ref_output.equal(output));
Reported by FlawFinder.
Line: 162
Column: 26
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
// back for the intermediate tensors
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
ASSERT_NO_THROW(validate_allocation_plan(true, true, true));
ASSERT_TRUE(ref_output.equal(output));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
ASSERT_NO_THROW(validate_allocation_plan(false, false, true));
ASSERT_TRUE(ref_output.equal(output));
// When control flow conditions are different between profiling and evaluation
Reported by FlawFinder.
Line: 165
Column: 26
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
ASSERT_TRUE(ref_output.equal(output));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
ASSERT_NO_THROW(validate_allocation_plan(false, false, true));
ASSERT_TRUE(ref_output.equal(output));
// When control flow conditions are different between profiling and evaluation
// profiling allocator should throw.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
ASSERT_THROW(validate_allocation_plan(true, false, false), c10::Error);
Reported by FlawFinder.