The following issues were found
caffe2/python/filler_test.py
4 issues
Line: 1
Column: 1
from caffe2.python import core, test_util, workspace
class TestFiller(test_util.TestCase):
def test_filler(self):
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python import core, test_util, workspace
class TestFiller(test_util.TestCase):
def test_filler(self):
net = core.Net("test_filler")
net.Concat(["X0", "X1", "X2"], ["concat_out", "split_info"])
self.assertFalse(workspace.HasBlob("X0"))
input_dim = (30, 20)
Reported by Pylint.
Line: 9
Column: 5
class TestFiller(test_util.TestCase):
def test_filler(self):
net = core.Net("test_filler")
net.Concat(["X0", "X1", "X2"], ["concat_out", "split_info"])
self.assertFalse(workspace.HasBlob("X0"))
input_dim = (30, 20)
workspace.FillRandomNetworkInputs(net, [[input_dim, input_dim, input_dim]], [["float", "float", "float"]])
Reported by Pylint.
Line: 14
Column: 1
net.Concat(["X0", "X1", "X2"], ["concat_out", "split_info"])
self.assertFalse(workspace.HasBlob("X0"))
input_dim = (30, 20)
workspace.FillRandomNetworkInputs(net, [[input_dim, input_dim, input_dim]], [["float", "float", "float"]])
self.assertTrue(workspace.HasBlob("X0"))
self.assertEqual(workspace.FetchBlob("X0").shape, input_dim)
with self.assertRaises(RuntimeError):
# Filler should throw if number of input dims/types is mismatched.
Reported by Pylint.
android/test_app/app/src/main/java/org/pytorch/testapp/Result.java
4 issues
Line: 5
class Result {
public final float[] scores;
public final long totalDuration;
public final long moduleForwardDuration;
public Result(float[] scores, long moduleForwardDuration, long totalDuration) {
this.scores = scores;
Reported by PMD.
Line: 6
class Result {
public final float[] scores;
public final long totalDuration;
public final long moduleForwardDuration;
public Result(float[] scores, long moduleForwardDuration, long totalDuration) {
this.scores = scores;
this.moduleForwardDuration = moduleForwardDuration;
Reported by PMD.
Line: 7
public final float[] scores;
public final long totalDuration;
public final long moduleForwardDuration;
public Result(float[] scores, long moduleForwardDuration, long totalDuration) {
this.scores = scores;
this.moduleForwardDuration = moduleForwardDuration;
this.totalDuration = totalDuration;
Reported by PMD.
Line: 9
public final long totalDuration;
public final long moduleForwardDuration;
public Result(float[] scores, long moduleForwardDuration, long totalDuration) {
this.scores = scores;
this.moduleForwardDuration = moduleForwardDuration;
this.totalDuration = totalDuration;
}
}
Reported by PMD.
android/test_app/app/src/main/java/org/pytorch/testapp/Utils.java
4 issues
Line: 5
import java.util.Arrays;
public class Utils {
public static int[] topK(float[] a, final int topk) {
float values[] = new float[topk];
Arrays.fill(values, -Float.MAX_VALUE);
int ixs[] = new int[topk];
Reported by PMD.
Line: 17
for (int j = 0; j < topk; j++) {
if (a[i] > values[j]) {
for (int k = topk - 1; k >= j + 1; k--) {
values[k] = values[k - 1];
ixs[k] = ixs[k - 1];
}
values[j] = a[i];
ixs[j] = i;
break;
Reported by PMD.
Line: 18
if (a[i] > values[j]) {
for (int k = topk - 1; k >= j + 1; k--) {
values[k] = values[k - 1];
ixs[k] = ixs[k - 1];
}
values[j] = a[i];
ixs[j] = i;
break;
}
Reported by PMD.
Line: 20
values[k] = values[k - 1];
ixs[k] = ixs[k - 1];
}
values[j] = a[i];
ixs[j] = i;
break;
}
}
}
Reported by PMD.
caffe2/python/benchmarks/concat_benchmark.py
4 issues
Line: 4
Column: 1
import argparse
import numpy as np
from caffe2.python import core, workspace
def benchmark_concat(num_inputs, input_dim, axis, add_axis, iterations):
input_names = [f"input{i}" for i in range(num_inputs)]
for n in input_names:
Reported by Pylint.
Line: 1
Column: 1
import argparse
import numpy as np
from caffe2.python import core, workspace
def benchmark_concat(num_inputs, input_dim, axis, add_axis, iterations):
input_names = [f"input{i}" for i in range(num_inputs)]
for n in input_names:
Reported by Pylint.
Line: 7
Column: 1
from caffe2.python import core, workspace
def benchmark_concat(num_inputs, input_dim, axis, add_axis, iterations):
input_names = [f"input{i}" for i in range(num_inputs)]
for n in input_names:
workspace.FeedBlob(n, np.random.randn(*input_dim).astype(np.float32))
net = core.Net("benchmark_net")
Reported by Pylint.
Line: 9
Column: 9
def benchmark_concat(num_inputs, input_dim, axis, add_axis, iterations):
input_names = [f"input{i}" for i in range(num_inputs)]
for n in input_names:
workspace.FeedBlob(n, np.random.randn(*input_dim).astype(np.float32))
net = core.Net("benchmark_net")
net.Concat(input_names, ["output", "split_info"], axis=axis, add_axis=add_axis)
workspace.CreateNet(net)
Reported by Pylint.
caffe2/perfkernels/lstm_unit_cpu-impl.h
4 issues
Line: 63
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
memset(H, 0, sizeof(T) * D);
memset(C, 0, sizeof(T) * D);
} else {
memcpy(H, H_prev, sizeof(T) * D);
memcpy(C, C_prev, sizeof(T) * D);
}
} else {
const T* X_D = &X[D];
const T* X_2D = &X[2 * D];
Reported by FlawFinder.
Line: 64
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
memset(C, 0, sizeof(T) * D);
} else {
memcpy(H, H_prev, sizeof(T) * D);
memcpy(C, C_prev, sizeof(T) * D);
}
} else {
const T* X_D = &X[D];
const T* X_2D = &X[2 * D];
const T* X_3D = &X[3 * D];
Reported by FlawFinder.
Line: 116
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
memset(C_prev_diff, 0, sizeof(T) * D);
memset(H_prev_diff, 0, sizeof(T) * D);
} else {
memcpy(H_prev_diff, H_diff, sizeof(T) * D);
memcpy(C_prev_diff, C_diff, sizeof(T) * D);
}
memset(X_diff, 0, 4 * sizeof(T) * D);
} else {
VECTOR_LOOP for (int d = 0; d < D; ++d) {
Reported by FlawFinder.
Line: 117
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
memset(H_prev_diff, 0, sizeof(T) * D);
} else {
memcpy(H_prev_diff, H_diff, sizeof(T) * D);
memcpy(C_prev_diff, C_diff, sizeof(T) * D);
}
memset(X_diff, 0, 4 * sizeof(T) * D);
} else {
VECTOR_LOOP for (int d = 0; d < D; ++d) {
T* c_prev_diff = C_prev_diff + d;
Reported by FlawFinder.
caffe2/cuda_rtc/pool_op_rtc_gpu.cc
4 issues
Line: 151
Column: 16
CWE codes:
134
Suggestion:
Use a constant for the format specification
const int pad_t,
const int pad_l) {
char buffer[65536];
int nbytes = snprintf(
buffer, 65536, kMaxPoolForwardNCHWSource, name_.c_str(), output_size,
channels, height, width, pooled_height, pooled_width, kernel_h, kernel_w,
stride_h, stride_w, pad_t, pad_l);
DCHECK_GE(nbytes, 0);
DCHECK_LT(nbytes, 65536);
Reported by FlawFinder.
Line: 176
Column: 16
CWE codes:
134
Suggestion:
Use a constant for the format specification
const int pad_t,
const int pad_l) {
char buffer[65536];
int nbytes = snprintf(
buffer, 65536, kMaxPoolBackwardNCHWSource, name_.c_str(), output_size,
num, channels, height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w, pad_t, pad_l);
DCHECK_GE(nbytes, 0);
DCHECK_LT(nbytes, 65536);
Reported by FlawFinder.
Line: 150
Column: 3
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
const int stride_w,
const int pad_t,
const int pad_l) {
char buffer[65536];
int nbytes = snprintf(
buffer, 65536, kMaxPoolForwardNCHWSource, name_.c_str(), output_size,
channels, height, width, pooled_height, pooled_width, kernel_h, kernel_w,
stride_h, stride_w, pad_t, pad_l);
DCHECK_GE(nbytes, 0);
Reported by FlawFinder.
Line: 175
Column: 3
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
const int stride_w,
const int pad_t,
const int pad_l) {
char buffer[65536];
int nbytes = snprintf(
buffer, 65536, kMaxPoolBackwardNCHWSource, name_.c_str(), output_size,
num, channels, height, width, pooled_height, pooled_width, kernel_h,
kernel_w, stride_h, stride_w, pad_t, pad_l);
DCHECK_GE(nbytes, 0);
Reported by FlawFinder.
caffe2/db/db_test.cc
4 issues
Line: 78
}
}
TEST(DBSeekTest, RocksDB) {
SKIP() << "The test is broken. So skip.";
DBSeekTestWrapper("rocksdb");
}
TEST(DBSeekTest, LevelDB) {
Reported by Cppcheck.
Line: 67
Column: 27
CWE codes:
377
}
static void DBSeekTestWrapper(const string& db_type) {
std::string name = std::tmpnam(nullptr);
if (!CreateAndFill(db_type, name)) {
// Manually fail the test, and not do anything onwards.
EXPECT_TRUE(0);
} else {
std::unique_ptr<DB> db(CreateDB(db_type, name, READ));
Reported by FlawFinder.
Line: 93
Column: 27
CWE codes:
377
}
TEST(DBReaderTest, Reader) {
std::string name = std::tmpnam(nullptr);
CreateAndFill("leveldb", name);
std::unique_ptr<DBReader> reader(new DBReader("leveldb", name));
EXPECT_TRUE(reader->cursor() != nullptr);
// DBReader should have a full-fledged cursor.
TestCursor(reader->cursor());
Reported by FlawFinder.
Line: 159
Column: 27
CWE codes:
377
}
TEST(DBReaderShardedTest, Reader) {
std::string name = std::tmpnam(nullptr);
CreateAndFill("leveldb", name);
std::unique_ptr<DBReader> reader0(new DBReader("leveldb", name, 3, 0));
string key;
string value;
Reported by FlawFinder.
caffe2/operators/slice_op.h
4 issues
Line: 234
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
ReinitializeTensor(&starts_host_, {static_cast<int64_t>(starts_.size())}, at::dtype<SIndex>().device(CPU));
ReinitializeTensor(&ends_host_, {static_cast<int64_t>(ends_.size())}, at::dtype<SIndex>().device(CPU));
memcpy(
starts_host_.template mutable_data<SIndex>(),
starts_.data(),
sizeof(SIndex) * starts_.size());
memcpy(
ends_host_.template mutable_data<SIndex>(),
Reported by FlawFinder.
Line: 238
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
starts_host_.template mutable_data<SIndex>(),
starts_.data(),
sizeof(SIndex) * starts_.size());
memcpy(
ends_host_.template mutable_data<SIndex>(),
ends_.data(),
sizeof(SIndex) * ends_.size());
statically_inited_ = true;
}
Reported by FlawFinder.
Line: 308
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
ReinitializeTensor(
&ends_host_, {static_cast<int64_t>(ends_.size())}, at::dtype<SIndex>().device(CPU));
memcpy(
starts_host_.template mutable_data<SIndex>(),
starts_.data(),
sizeof(SIndex) * starts_.size());
memcpy(
ends_host_.template mutable_data<SIndex>(),
Reported by FlawFinder.
Line: 312
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
starts_host_.template mutable_data<SIndex>(),
starts_.data(),
sizeof(SIndex) * starts_.size());
memcpy(
ends_host_.template mutable_data<SIndex>(),
ends_.data(),
sizeof(SIndex) * ends_.size());
statically_inited_ = true;
Reported by FlawFinder.
caffe2/operators/segment_reduction_op.cc
4 issues
Line: 374
Column: 7
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
#define REGISTER_SEGMENT_DEF_SCHEMA_GRADIENT_ONLY( \
segment_name, gradient_name, ...) \
static_assert( \
equal(#segment_name, __VA_ARGS__::basename, __VA_ARGS__::OpDef::name), \
#segment_name); \
static_assert( \
equal( \
#gradient_name, \
__VA_ARGS__::basename, \
Reported by FlawFinder.
Line: 399
Column: 7
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
#define REGISTER_SEGMENT_DEF(segment_name, gradient_name, ...) \
static_assert( \
equal(#segment_name, __VA_ARGS__::basename, __VA_ARGS__::OpDef::name), \
#segment_name); \
REGISTER_CPU_OPERATOR_STR(string(#segment_name), __VA_ARGS__::ForwardOp); \
REGISTER_SEGMENT_DEF_SCHEMA_GRADIENT_ONLY( \
segment_name, gradient_name, __VA_ARGS__)
Reported by FlawFinder.
Line: 577
Column: 7
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
#define REGISTER_SEGMENT_DEF_MAIN_INPUT_AND_FORWARD_OUTPUT_GRADIENT( \
segment_name, gradient_name, ...) \
static_assert( \
equal(#segment_name, __VA_ARGS__::basename, __VA_ARGS__::OpDef::name), \
#segment_name); \
OPERATOR_SCHEMA(segment_name) \
.NumInputs(__VA_ARGS__::ForwardOp::kNumInputs) \
.NumOutputs(1) \
.SetDoc(FormatDoc<__VA_ARGS__>()) \
Reported by FlawFinder.
Line: 594
Column: 7
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
#define REGISTER_LENGTHS_OPS_MAIN_INPUT_AND_FORWARD_OUTPUT_GRADIENT( \
segment_name, gradient_name, ...) \
static_assert( \
equal(#segment_name, __VA_ARGS__::basename, __VA_ARGS__::OpDef::name), \
#segment_name); \
REGISTER_CPU_OPERATOR_STR(string(#segment_name), __VA_ARGS__::ForwardOp); \
REGISTER_SEGMENT_DEF_MAIN_INPUT_AND_FORWARD_OUTPUT_GRADIENT( \
segment_name, gradient_name, __VA_ARGS__)
Reported by FlawFinder.
caffe2/python/crf_predict.py
4 issues
Line: 1
Column: 1
import numpy as np
from caffe2.python.crf import CRFWithLoss
def crf_update_predictions(model, crf_with_loss, classes):
return apply_crf(
model.param_init_net,
Reported by Pylint.
Line: 7
Column: 1
from caffe2.python.crf import CRFWithLoss
def crf_update_predictions(model, crf_with_loss, classes):
return apply_crf(
model.param_init_net,
model.net,
crf_with_loss.transitions,
classes,
Reported by Pylint.
Line: 17
Column: 1
)
def apply_crf(init_net, net, transitions, predictions, num_classes):
padded_classes = CRFWithLoss.pad_predictions(
predictions, init_net, net, num_classes
)
bestPath = net.ViterbiPath([padded_classes, transitions])
new_padded_classes = net.SwapBestPath([padded_classes, bestPath])
Reported by Pylint.
Line: 21
Column: 5
padded_classes = CRFWithLoss.pad_predictions(
predictions, init_net, net, num_classes
)
bestPath = net.ViterbiPath([padded_classes, transitions])
new_padded_classes = net.SwapBestPath([padded_classes, bestPath])
# Revert the effect of pad_predictions by removing the last two rows and
# the last two columns
new_classes = net.RemovePadding(
[new_padded_classes], padding_width=1, end_padding_width=1
Reported by Pylint.