The following issues were found
aten/src/ATen/CPUGeneratorImpl.cpp
4 issues
Line: 114
Column: 26
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
*/
uint64_t CPUGeneratorImpl::seed() {
auto random = c10::detail::getNonDeterministicRandom();
this->set_current_seed(random);
return random;
}
/**
* Sets the internal state of CPUGeneratorImpl. The new internal state
Reported by FlawFinder.
Line: 115
Column: 10
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
uint64_t CPUGeneratorImpl::seed() {
auto random = c10::detail::getNonDeterministicRandom();
this->set_current_seed(random);
return random;
}
/**
* Sets the internal state of CPUGeneratorImpl. The new internal state
* must be a strided CPU byte tensor and of the same size as either
Reported by FlawFinder.
Line: 255
Column: 28
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
*
* See Note [Acquire lock when using random generators]
*/
uint32_t CPUGeneratorImpl::random() {
return engine_();
}
/**
* Gets a random 64 bit unsigned integer from the engine
Reported by FlawFinder.
Line: 238
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
accum_state->next_float_normal_sample = *(this->next_float_normal_sample_);
}
memcpy(rng_state, accum_state.get(), size);
return state_tensor.getIntrusivePtr();
}
/**
* Gets the DeviceType of CPUGeneratorImpl.
Reported by FlawFinder.
caffe2/ideep/utils/ideep_context.h
4 issues
Line: 58
Column: 5
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
}
CAFFE_ENFORCE(src);
CAFFE_ENFORCE(dst);
memcpy(dst, src, nbytes);
}
void CopyBytesFromCPU(size_t nbytes, const void* src, void* dst) override {
CopyBytesSameDevice(nbytes, src, dst);
}
Reported by FlawFinder.
Line: 142
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
}
CAFFE_ENFORCE(src);
CAFFE_ENFORCE(dst);
memcpy(dst, src, nbytes);
}
template <>
inline void IDEEPContext::CopyBytes<CPUContext, IDEEPContext>(
size_t nbytes,
Reported by FlawFinder.
Line: 155
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
}
CAFFE_ENFORCE(src);
CAFFE_ENFORCE(dst);
memcpy(dst, src, nbytes);
}
template <>
inline void IDEEPContext::CopyBytes<IDEEPContext, CPUContext>(
size_t nbytes,
Reported by FlawFinder.
Line: 168
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
}
CAFFE_ENFORCE(src);
CAFFE_ENFORCE(dst);
memcpy(dst, src, nbytes);
}
} // namespace caffe2
Reported by FlawFinder.
caffe2/python/helpers/algebra.py
4 issues
Line: 16
Column: 1
return model.net.Transpose(blob_in, blob_out, **kwargs)
def sum(model, blob_in, blob_out, **kwargs):
"""Sum"""
return model.net.Sum(blob_in, blob_out, **kwargs)
def reduce_sum(model, blob_in, blob_out, **kwargs):
Reported by Pylint.
Line: 1
Column: 1
## @package algebra
# Module caffe2.python.helpers.algebra
def transpose(model, blob_in, blob_out, use_cudnn=False, **kwargs):
Reported by Pylint.
Line: 40
Column: 1
"""ArgMin"""
return model.net.ArgMin(blob_in, blob_out, **kwargs)
def batch_mat_mul(model, blob_in, blob_out,
enable_tensor_core=False, **kwargs):
if enable_tensor_core:
kwargs['engine'] = 'TENSORCORE'
return model.net.BatchMatMul(blob_in, blob_out, **kwargs)
Reported by Pylint.
Line: 47
Column: 1
return model.net.BatchMatMul(blob_in, blob_out, **kwargs)
def sparse_lengths_sum_4bit_rowwise_sparse(model, blob_in, blob_out, **kwargs):
return model.net.SparseLengthsSum4BitRowwiseSparse(blob_in, blob_out, **kwargs)
Reported by Pylint.
aten/src/ATen/quantized/Quantizer.h
4 issues
Line: 147
Column: 18
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto* other_per_channel_affine =
static_cast<PerChannelAffineQuantizer*>(other.get());
return scalar_type() == other_per_channel_affine->scalar_type() &&
scales().equal(other_per_channel_affine->scales()) &&
zero_points().equal(other_per_channel_affine->zero_points()) &&
axis() == other_per_channel_affine->axis();
}
protected:
Reported by FlawFinder.
Line: 148
Column: 23
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
static_cast<PerChannelAffineQuantizer*>(other.get());
return scalar_type() == other_per_channel_affine->scalar_type() &&
scales().equal(other_per_channel_affine->scales()) &&
zero_points().equal(other_per_channel_affine->zero_points()) &&
axis() == other_per_channel_affine->axis();
}
protected:
Tensor scales_;
Reported by FlawFinder.
Line: 197
Column: 18
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto* other_per_channel_float_qparams =
static_cast<PerChannelAffineFloatQParamsQuantizer*>(other.get());
return scalar_type() == other_per_channel_float_qparams->scalar_type() &&
scales().equal(other_per_channel_float_qparams->scales()) &&
zero_points().equal(other_per_channel_float_qparams->zero_points()) &&
axis() == other_per_channel_float_qparams->axis();
}
};
Reported by FlawFinder.
Line: 198
Column: 23
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
static_cast<PerChannelAffineFloatQParamsQuantizer*>(other.get());
return scalar_type() == other_per_channel_float_qparams->scalar_type() &&
scales().equal(other_per_channel_float_qparams->scales()) &&
zero_points().equal(other_per_channel_float_qparams->zero_points()) &&
axis() == other_per_channel_float_qparams->axis();
}
};
// This is an internal utility function for getting at the QTensorImpl,
Reported by FlawFinder.
aten/src/ATen/native/vulkan/ops/Tensor.h
4 issues
Line: 285
Column: 25
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
struct Bundle final {
struct Buffer final {
VkPipelineStageFlags stage;
VkAccessFlags access;
operator bool() const;
} staging, buffer;
struct Image final {
Reported by FlawFinder.
Line: 292
Column: 25
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
struct Image final {
VkPipelineStageFlags stage;
VkAccessFlags access;
VkImageLayout layout;
operator bool() const;
} image;
};
Reported by FlawFinder.
Line: 548
Column: 17
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
inline vTensor::View::State::Bundle::Buffer::operator bool() const {
return (0u != stage) &&
(0u != access);
}
inline vTensor::View::State::Bundle::Image::operator bool() const {
return (0u != stage) &&
(0u != access) &&
Reported by FlawFinder.
Line: 553
Column: 17
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
inline vTensor::View::State::Bundle::Image::operator bool() const {
return (0u != stage) &&
(0u != access) &&
(VK_IMAGE_LAYOUT_UNDEFINED != layout);
}
inline bool vTensor::View::State::is_available(
const Component::Flags components) const {
Reported by FlawFinder.
caffe2/python/layers/__init__.py
4 issues
Line: 9
Column: 1
from importlib import import_module
import pkgutil
import sys
from . import layers
def import_recursive(package):
"""
Takes a package and imports all modules underneath it
Reported by Pylint.
Line: 29
Column: 9
def find_subclasses_recursively(base_cls, sub_cls):
cur_sub_cls = base_cls.__subclasses__()
sub_cls.update(cur_sub_cls)
for cls in cur_sub_cls:
find_subclasses_recursively(cls, sub_cls)
import_recursive(sys.modules[__name__])
Reported by Pylint.
Line: 1
Column: 1
from importlib import import_module
import pkgutil
import sys
from . import layers
Reported by Pylint.
Line: 26
Column: 1
import_recursive(module)
def find_subclasses_recursively(base_cls, sub_cls):
cur_sub_cls = base_cls.__subclasses__()
sub_cls.update(cur_sub_cls)
for cls in cur_sub_cls:
find_subclasses_recursively(cls, sub_cls)
Reported by Pylint.
caffe2/python/layers/last_n_window_collector.py
4 issues
Line: 1
Column: 1
## @package last_n_window_collector
# Module caffe2.python.layers.last_n_window_collector
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class LastNWindowCollector(ModelLayer):
"""
Reported by Pylint.
Line: 18
Column: 9
def __init__(self, model, input_record, num_to_collect,
name='last_n_window_collector', **kwargs):
super(LastNWindowCollector, self).__init__(
model, name, input_record, **kwargs)
assert num_to_collect > 0
self.num_to_collect = num_to_collect
assert isinstance(input_record, schema.Scalar), \
"Got {!r}".format(input_record)
Reported by Pylint.
Line: 20
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
name='last_n_window_collector', **kwargs):
super(LastNWindowCollector, self).__init__(
model, name, input_record, **kwargs)
assert num_to_collect > 0
self.num_to_collect = num_to_collect
assert isinstance(input_record, schema.Scalar), \
"Got {!r}".format(input_record)
self.last_n = self.create_param(param_name='last_n',
Reported by Bandit.
Line: 22
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
model, name, input_record, **kwargs)
assert num_to_collect > 0
self.num_to_collect = num_to_collect
assert isinstance(input_record, schema.Scalar), \
"Got {!r}".format(input_record)
self.last_n = self.create_param(param_name='last_n',
shape=[0],
initializer=('ConstantFill', {}),
Reported by Bandit.
caffe2/quantization/server/im2col_dnnlowp.h
4 issues
Line: 57
Column: 11
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
const auto iy = y * stride_h + kh;
const auto ix = kw;
if (stride_w == 1) {
memcpy(
dst + (y * output_w),
src + (iy * width + ix),
sizeof(T) * output_w);
} else {
for (auto x = 0; x < output_w; x++) {
Reported by FlawFinder.
Line: 63
Column: 13
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
sizeof(T) * output_w);
} else {
for (auto x = 0; x < output_w; x++) {
memcpy(
dst + (y * output_w + x),
src + (iy * width + ix + x * stride_w),
sizeof(T));
}
}
Reported by FlawFinder.
Line: 226
Column: 15
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (int iw = w_pad; iw < w_pad + dkernel_w; iw += dilation_w, ++s) {
if (ih >= 0 && ih < height && iw >= 0 && iw < width) {
for (int g = 0; g < groups; ++g) {
memcpy(
data_col_temp +
((g * kernel_h + r) * kernel_w + s) * (channels / groups),
data_im + (ih * width + iw) * channels +
g * (channels / groups),
sizeof(T) * (channels / groups));
Reported by FlawFinder.
Line: 313
Column: 19
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
if (it >= 0 && it < num_frames && ih >= 0 && ih < height &&
iw >= 0 && iw < width) {
for (int g = 0; g < groups; ++g) {
memcpy(
data_col_temp +
(((g * kernel_t + q) * kernel_h + r) * kernel_w + s) *
(channels / groups),
data_im + ((it * height + ih) * width + iw) * channels +
g * (channels / groups),
Reported by FlawFinder.
test/cpp/c10d/example/allreduce.cpp
4 issues
Line: 8
Column: 19
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
using namespace ::c10d;
int main(int argc, char** argv) {
int rank = atoi(getenv("RANK"));
int size = atoi(getenv("SIZE"));
auto store = c10::make_intrusive<FileStore>("/tmp/c10d_example", size);
ProcessGroupGloo pg(store, rank, size);
// Create some tensors
Reported by FlawFinder.
Line: 9
Column: 19
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
int main(int argc, char** argv) {
int rank = atoi(getenv("RANK"));
int size = atoi(getenv("SIZE"));
auto store = c10::make_intrusive<FileStore>("/tmp/c10d_example", size);
ProcessGroupGloo pg(store, rank, size);
// Create some tensors
const auto ntensors = 10;
Reported by FlawFinder.
Line: 8
Column: 14
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
using namespace ::c10d;
int main(int argc, char** argv) {
int rank = atoi(getenv("RANK"));
int size = atoi(getenv("SIZE"));
auto store = c10::make_intrusive<FileStore>("/tmp/c10d_example", size);
ProcessGroupGloo pg(store, rank, size);
// Create some tensors
Reported by FlawFinder.
Line: 9
Column: 14
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
int main(int argc, char** argv) {
int rank = atoi(getenv("RANK"));
int size = atoi(getenv("SIZE"));
auto store = c10::make_intrusive<FileStore>("/tmp/c10d_example", size);
ProcessGroupGloo pg(store, rank, size);
// Create some tensors
const auto ntensors = 10;
Reported by FlawFinder.
test/cpp/rpc/test_wire_serialization.cpp
4 issues
Line: 31
Column: 26
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
memcmp(deser.first.data(), payload.data(), payload.size()) == 0);
}
for (size_t i = 0; i < tensors.size(); ++i) {
EXPECT_TRUE(torch::equal(tensors[i], deser.second[i]));
}
};
run("", {});
run("hi", {});
run("", {torch::randn({5, 5})});
Reported by FlawFinder.
Line: 50
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
EXPECT_EQ(tiny.storage().nbytes() / tiny.dtype().itemsize(), k1K * k1K);
auto ser = torch::distributed::rpc::wireSerialize({}, {tiny});
auto deser = torch::distributed::rpc::wireDeserialize(ser.data(), ser.size());
EXPECT_TRUE(torch::equal(tiny, deser.second[0]));
EXPECT_LT(ser.size(), (tiny.element_size() * k1K) + k1K);
}
TEST(WireSerialize, CloneSparseTensors) {
constexpr size_t k1K = 1024;
Reported by FlawFinder.
Line: 63
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
at::Tensor tiny = big.select(0, 2); // Select a row in the middle
auto v2 = torch::distributed::rpc::cloneSparseTensors({tiny});
EXPECT_NE(&v2.get(0).storage(), &tiny.storage()); // Cloned.
EXPECT_TRUE(torch::equal(v2.get(0), tiny));
at::Tensor sparse = at::empty({2, 3}, at::dtype<float>().layout(at::kSparse));
auto v3 = torch::distributed::rpc::cloneSparseTensors({sparse});
// There is no storage() to compare, but at least confirm equality.
EXPECT_TRUE(v3.get(0).is_same(sparse));
Reported by FlawFinder.
Line: 103
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
at::Tensor main = at::empty({2, 3}, at::dtype<float>().layout(at::kSparse));
auto ser = torch::distributed::rpc::wireSerialize({}, {main.to(at::kSparse)});
auto deser = torch::distributed::rpc::wireDeserialize(ser.data(), ser.size());
EXPECT_TRUE(torch::equal(main, deser.second[0]));
}
Reported by FlawFinder.