The following issues were found
aten/src/ATen/native/vulkan/api/Resource.h
3 issues
Line: 55
Column: 37
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
Write = 1u << 1u,
};
template<typename Type, Flags access>
using Pointer = std::add_pointer_t<
std::conditional_t<
0u != (access & Write),
Type,
std::add_const_t<Type>>>;
Reported by FlawFinder.
Line: 58
Column: 22
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
template<typename Type, Flags access>
using Pointer = std::add_pointer_t<
std::conditional_t<
0u != (access & Write),
Type,
std::add_const_t<Type>>>;
};
class Scope;
Reported by FlawFinder.
Line: 350
Column: 21
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
Scope(
VmaAllocator allocator,
VmaAllocation allocation,
Access::Flags access);
void operator()(const void* data) const;
private:
VmaAllocator allocator_;
Reported by FlawFinder.
android/pytorch_android/src/main/java/org/pytorch/PyTorchCodegenLoader.java
3 issues
Line: 5
import com.facebook.soloader.nativeloader.NativeLoader;
public class PyTorchCodegenLoader {
public static void loadNativeLibs() {
try {
NativeLoader.loadLibrary("torch-code-gen");
} catch (Throwable t) {
Reported by PMD.
Line: 10
public static void loadNativeLibs() {
try {
NativeLoader.loadLibrary("torch-code-gen");
} catch (Throwable t) {
// Loading the codegen lib is best-effort since it's only there for query based builds.
}
}
private PyTorchCodegenLoader() {}
Reported by PMD.
Line: 10
public static void loadNativeLibs() {
try {
NativeLoader.loadLibrary("torch-code-gen");
} catch (Throwable t) {
// Loading the codegen lib is best-effort since it's only there for query based builds.
}
}
private PyTorchCodegenLoader() {}
Reported by PMD.
caffe2/distributed/file_store_handler.cc
3 issues
Line: 72
Column: 14
CWE codes:
120/785!
Suggestion:
Ensure that the destination buffer is at least of size MAXPATHLEN, andto protect against implementation problems, the input argument should also be checked to ensure it is no larger than MAXPATHLEN
#else
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
std::array<char, PATH_MAX> buf;
auto ret = realpath(path.c_str(), buf.data());
#endif
CHECK_EQ(buf.data(), ret) << "realpath: " << strerror(errno);
return std::string(buf.data());
}
Reported by FlawFinder.
Line: 151
Column: 14
CWE codes:
362
}
for (const auto& path : paths) {
int fd = open(path.c_str(), O_RDONLY);
if (fd == -1) {
// Only deal with files that don't exist.
// Anything else is a problem.
CHECK_EQ(errno, ENOENT);
Reported by FlawFinder.
Line: 122
Column: 7
CWE codes:
120
20
size_t n = ifs.tellg();
result.resize(n);
ifs.seekg(0);
ifs.read(&result[0], n);
return result;
}
int64_t FileStoreHandler::add(
const std::string& /* unused */,
Reported by FlawFinder.
benchmarks/distributed/rpc/parameter_server/utils.py
3 issues
Line: 1
Column: 1
import torch
RPC_SPARSE = "rpc_sparse"
RPC_DENSE = "rpc_dense"
def sparse_tensor_to_rpc_format(sparse_tensor):
r"""
A helper function creates a list containing the indices, values, and size
Reported by Pylint.
Line: 1
Column: 1
import torch
RPC_SPARSE = "rpc_sparse"
RPC_DENSE = "rpc_dense"
def sparse_tensor_to_rpc_format(sparse_tensor):
r"""
A helper function creates a list containing the indices, values, and size
Reported by Pylint.
Line: 63
Column: 12
def callback(fut):
cref.record_end("hook_future_metric", key)
tensor = fut.wait()
if type(tensor) is list:
tensor = sparse_rpc_format_to_tensor(tensor)
tensor = tensor.cuda(cref.rank)
return [tensor]
return fut.then(callback)
Reported by Pylint.
caffe2/python/control_ops_grad_test.py
3 issues
Line: 1
Column: 1
import unittest
from caffe2.python import core, test_util, workspace
from caffe2.python.control_ops_grad import disambiguate_grad_if_op_output
from caffe2.python.model_helper import ModelHelper
Reported by Pylint.
Line: 13
Column: 1
import numpy as np
class TestControl(test_util.TestCase):
def test_disambiguate_grad_if_op_output(self):
workspace.FeedBlob("cond", np.array(True))
workspace.FeedBlob("then_grad", np.array(1))
workspace.FeedBlob("else_grad", np.array(2))
Reported by Pylint.
Line: 14
Column: 5
class TestControl(test_util.TestCase):
def test_disambiguate_grad_if_op_output(self):
workspace.FeedBlob("cond", np.array(True))
workspace.FeedBlob("then_grad", np.array(1))
workspace.FeedBlob("else_grad", np.array(2))
then_model = ModelHelper(name="then_test_model")
Reported by Pylint.
aten/src/ATen/test/vec_test_all_types.h
3 issues
Line: 259
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
To>::type
bit_cast(const From& src) noexcept {
To dst;
std::memcpy(&dst, &src, sizeof(To));
return dst;
}
template <class To, class T>
To bit_cast_ptr(T* p, size_t N = sizeof(To)) noexcept {
Reported by FlawFinder.
Line: 265
Column: 14
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
template <class To, class T>
To bit_cast_ptr(T* p, size_t N = sizeof(To)) noexcept {
unsigned char p1[sizeof(To)] = {};
std::memcpy(p1, p, std::min(N, sizeof(To)));
return bit_cast<To>(p1);
}
template <typename T>
Reported by FlawFinder.
Line: 266
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
template <class To, class T>
To bit_cast_ptr(T* p, size_t N = sizeof(To)) noexcept {
unsigned char p1[sizeof(To)] = {};
std::memcpy(p1, p, std::min(N, sizeof(To)));
return bit_cast<To>(p1);
}
template <typename T>
std::enable_if_t<std::is_floating_point<T>::value, bool> check_both_nan(T x,
Reported by FlawFinder.
caffe2/contrib/prof/cuda_profile_ops.cc
3 issues
Line: 40
Column: 19
CWE codes:
377
std::string tmpl = "/tmp/cuda_profile_config.XXXXXX";
CAFFE_ENFORCE_LT(tmpl.size(), buf.size());
memcpy(buf.data(), tmpl.data(), tmpl.size());
auto result = mktemp(buf.data());
CAFFE_ENFORCE_NE(strlen(result), 0, "mktemp: ", strerror(errno));
config_ = result;
// Write configuration to temporary file
{
Reported by FlawFinder.
Line: 39
Column: 5
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
std::array<char, 128> buf;
std::string tmpl = "/tmp/cuda_profile_config.XXXXXX";
CAFFE_ENFORCE_LT(tmpl.size(), buf.size());
memcpy(buf.data(), tmpl.data(), tmpl.size());
auto result = mktemp(buf.data());
CAFFE_ENFORCE_NE(strlen(result), 0, "mktemp: ", strerror(errno));
config_ = result;
// Write configuration to temporary file
Reported by FlawFinder.
Line: 41
Column: 22
CWE codes:
126
CAFFE_ENFORCE_LT(tmpl.size(), buf.size());
memcpy(buf.data(), tmpl.data(), tmpl.size());
auto result = mktemp(buf.data());
CAFFE_ENFORCE_NE(strlen(result), 0, "mktemp: ", strerror(errno));
config_ = result;
// Write configuration to temporary file
{
std::ofstream ofs(config_, std::ios::out | std::ios::trunc);
Reported by FlawFinder.
aten/src/ATen/cpu/vec/vec_base.h
3 issues
Line: 199
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
}
static Vectorized<T> loadu(const void* ptr) {
Vectorized vector;
std::memcpy(vector.values, ptr, VECTOR_WIDTH);
return vector;
}
static Vectorized<T> loadu(const void* ptr, int64_t count) {
Vectorized vector;
std::memcpy(vector.values, ptr, count * sizeof(T));
Reported by FlawFinder.
Line: 204
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
}
static Vectorized<T> loadu(const void* ptr, int64_t count) {
Vectorized vector;
std::memcpy(vector.values, ptr, count * sizeof(T));
return vector;
}
void store(void* ptr, int count = size()) const {
std::memcpy(ptr, values, count * sizeof(T));
}
Reported by FlawFinder.
Line: 208
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
return vector;
}
void store(void* ptr, int count = size()) const {
std::memcpy(ptr, values, count * sizeof(T));
}
int zero_mask() const {
// returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
int mask = 0;
for (int i = 0; i < size(); ++ i) {
Reported by FlawFinder.
aten/src/ATen/native/quantized/cpu/qupsample_nearest3d.cpp
3 issues
Line: 41
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
// special case: just copy
if (input_depth == output_depth && input_height == output_height && input_width == output_width) {
std::memcpy(o_p, i_p, channels * input_depth * input_height * input_width * sizeof(typename scalar_t::underlying));
return;
}
for (int64_t d2 = 0; d2 < output_depth; ++d2) {
const int64_t d1 =
Reported by FlawFinder.
Line: 94
Column: 12
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
auto* o_p = reinterpret_cast<typename scalar_t::underlying*>(odata + b * output_depth * output_height * output_width * channels);
// special case: just copy
if (input_depth == output_depth && input_height == output_height && input_width == output_width) {
std::memcpy(o_p, i_p, channels * input_depth * input_height * input_width * sizeof(typename scalar_t::underlying));
return;
}
for (int64_t d2 = 0; d2 < output_depth; ++d2) {
const int64_t d1 =
Reported by FlawFinder.
Line: 111
Column: 16
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
const auto* pos1 = &i_p[(d1 * input_height * input_width + h1 * input_width + w1)*channels];
auto* pos2 = &o_p[(d2 * output_height * output_width + h2 * output_width + w2)*channels];
std::memcpy(pos2, pos1, channels * sizeof(typename scalar_t::underlying));
}
}
}
}
}
Reported by FlawFinder.
caffe2/mobile/contrib/snpe/snpe_ffi.cc
3 issues
Line: 27
Column: 50
CWE codes:
362
struct SNPEContext {
public:
SNPEContext(const std::vector<uint8_t>& buffer, const char* input_name, bool enable_logging=false) {
container_ = zdl::DlContainer::IDlContainer::open(buffer);
SNPE_ENFORCE(container_);
zdl::SNPE::SNPEBuilder snpeBuilder(container_.get());
SNPE_ENFORCE(zdl::SNPE::SNPEFactory::isRuntimeAvailable(zdl::DlSystem::Runtime_t::GPU));
Reported by FlawFinder.
Line: 63
Column: 5
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
SNPE_ENFORCE(inputData);
// Copy input data.
memcpy(inputTensor_->begin().dataPointer(), inputData, (count * sizeof(float)));
SNPE_ENFORCE(inputTensor_.get());
// Execute graph in the SNPE runtime.
SNPE_ENFORCE(dnn_->execute(inputTensor_.get(), outputTensors_));
Reported by FlawFinder.
Line: 82
Column: 5
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
void copyOutputTo(float* outputData) {
const auto& outputTensor = outputTensors_.getTensor(*outputTensors_.getTensorNames().begin());
SNPE_ENFORCE(outputTensor);
memcpy(outputData, outputTensor->begin().dataPointer(), (outputTensor->getSize() * sizeof(float)));
}
private:
std::shared_ptr<zdl::DlContainer::IDlContainer> container_;
std::shared_ptr<zdl::SNPE::SNPE> dnn_;
Reported by FlawFinder.