The following issues were found
benchmarks/operator_benchmark/common/repeat_benchmark.py
8 issues
Line: 2
Column: 1
import numpy as np
import torch
import time
"""Microbenchmarks for Tensor repeat operator. Supports PyTorch."""
input_shapes = (
(4, 4, 1),
Reported by Pylint.
Line: 6
Column: 1
import time
"""Microbenchmarks for Tensor repeat operator. Supports PyTorch."""
input_shapes = (
(4, 4, 1),
(16, 1, 32),
(64, 64, 1, 1),
Reported by Pylint.
Line: 31
Column: 5
DTYPE_TO_BYTES = {'float' : 4}
def generate_data_for_repeat():
input_tensors = [torch.randn(*input_shape) for input_shape in input_shapes]
total_num_elements = 0
for input_tensor, repeat in zip(input_tensors, repeats):
total_num_elements += input_tensor.numel()
total_num_elements += input_tensor.numel() * np.prod(repeat)
return input_tensors, (total_num_elements * DTYPE_TO_BYTES['float'])
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import torch
import time
"""Microbenchmarks for Tensor repeat operator. Supports PyTorch."""
input_shapes = (
(4, 4, 1),
Reported by Pylint.
Line: 4
Column: 1
import numpy as np
import torch
import time
"""Microbenchmarks for Tensor repeat operator. Supports PyTorch."""
input_shapes = (
(4, 4, 1),
Reported by Pylint.
Line: 30
Column: 1
NUM_BENCHMARK_ITERS = 10
DTYPE_TO_BYTES = {'float' : 4}
def generate_data_for_repeat():
input_tensors = [torch.randn(*input_shape) for input_shape in input_shapes]
total_num_elements = 0
for input_tensor, repeat in zip(input_tensors, repeats):
total_num_elements += input_tensor.numel()
total_num_elements += input_tensor.numel() * np.prod(repeat)
Reported by Pylint.
Line: 41
Column: 1
input_tensors, total_bytes = generate_data_for_repeat()
BYTES_TO_MB = (1. / 1000. / 1000.)
def pt_repeat(input_tensor, repeat):
return input_tensor.repeat(repeat)
def pt_repeat_n_times(niters):
for _ in range(niters):
for input_tensor, repeat in zip(input_tensors, repeats):
Reported by Pylint.
Line: 44
Column: 1
def pt_repeat(input_tensor, repeat):
return input_tensor.repeat(repeat)
def pt_repeat_n_times(niters):
for _ in range(niters):
for input_tensor, repeat in zip(input_tensors, repeats):
pt_repeat(input_tensor, repeat)
if __name__ == "__main__":
Reported by Pylint.
.circleci/cimodel/data/simple/nightly_android.py
8 issues
Line: 1
Column: 1
from cimodel.data.simple.util.docker_constants import (
DOCKER_IMAGE_NDK,
DOCKER_REQUIREMENT_NDK
)
class AndroidNightlyJob:
def __init__(self,
variant,
Reported by Pylint.
Line: 7
Column: 1
)
class AndroidNightlyJob:
def __init__(self,
variant,
template_name,
extra_props=None,
with_docker=True,
Reported by Pylint.
Line: 7
Column: 1
)
class AndroidNightlyJob:
def __init__(self,
variant,
template_name,
extra_props=None,
with_docker=True,
Reported by Pylint.
Line: 8
Column: 5
class AndroidNightlyJob:
def __init__(self,
variant,
template_name,
extra_props=None,
with_docker=True,
requires=None,
Reported by Pylint.
Line: 23
Column: 5
self.requires = requires
self.no_build_suffix = no_build_suffix
def gen_tree(self):
base_name_parts = [
"pytorch",
"linux",
"xenial",
Reported by Pylint.
Line: 67
Column: 1
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build",
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_64_build",
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v7a_build",
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v8a_build"]),
AndroidNightlyJob(["x86_32_android_publish_snapshot"], "pytorch_android_publish_snapshot",
extra_props={"context": "org-member"},
with_docker=False,
requires=["nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_android_gradle_build"],
no_build_suffix=True),
Reported by Pylint.
Line: 71
Column: 1
AndroidNightlyJob(["x86_32_android_publish_snapshot"], "pytorch_android_publish_snapshot",
extra_props={"context": "org-member"},
with_docker=False,
requires=["nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_android_gradle_build"],
no_build_suffix=True),
]
def get_workflow_jobs():
Reported by Pylint.
Line: 76
Column: 1
]
def get_workflow_jobs():
return [item.gen_tree() for item in WORKFLOW_DATA]
Reported by Pylint.
caffe2/python/examples/resnet50_trainer.py
8 issues
Line: 685
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b108_hardcoded_tmp_directory.html
help="Host of Redis server (for rendezvous)")
parser.add_argument("--redis_port", type=int, default=6379,
help="Port of Redis server (for rendezvous)")
parser.add_argument("--file_store_path", type=str, default="/tmp",
help="Path to directory to use for rendezvous")
parser.add_argument("--save_model_name", type=str, default="resnext_model",
help="Save the trained model to a given name")
parser.add_argument("--load_model_path", type=str, default=None,
help="Load previously saved model to continue training")
Reported by Bandit.
Line: 157
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
else:
init_net.RunAllOnGPU()
assert workspace.RunNetOnce(predict_init_net)
assert workspace.RunNetOnce(init_net)
# Hack: fix iteration counter which is in CUDA context after load model
itercnt = workspace.FetchBlob("optimizer_iteration")
workspace.FeedBlob(
Reported by Bandit.
Line: 158
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
init_net.RunAllOnGPU()
assert workspace.RunNetOnce(predict_init_net)
assert workspace.RunNetOnce(init_net)
# Hack: fix iteration counter which is in CUDA context after load model
itercnt = workspace.FetchBlob("optimizer_iteration")
workspace.FeedBlob(
"optimizer_iteration",
Reported by Bandit.
Line: 247
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
'top5_test_accuracy': test_accuracy_top5,
}
)
assert loss < 40, "Exploded gradients :("
# TODO: add checkpointing
return epoch + 1
Reported by Bandit.
Line: 272
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
# Verify valid batch size
total_batch_size = args.batch_size
batch_per_device = total_batch_size // num_gpus
assert \
total_batch_size % num_gpus == 0, \
"Number of GPUs must divide batch size"
# Verify valid image mean/std per channel
if args.image_mean_per_channel:
Reported by Bandit.
Line: 278
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
# Verify valid image mean/std per channel
if args.image_mean_per_channel:
assert \
len(args.image_mean_per_channel) == args.num_channels, \
"The number of channels of image mean doesn't match input"
if args.image_std_per_channel:
assert \
Reported by Bandit.
Line: 283
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
"The number of channels of image mean doesn't match input"
if args.image_std_per_channel:
assert \
len(args.image_std_per_channel) == args.num_channels, \
"The number of channels of image std doesn't match input"
# Round down epoch size to closest multiple of batch size across machines
global_batch_size = total_batch_size * args.num_shards
Reported by Bandit.
Line: 291
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
global_batch_size = total_batch_size * args.num_shards
epoch_iters = int(args.epoch_size / global_batch_size)
assert \
epoch_iters > 0, \
"Epoch size must be larger than batch size times shard count"
args.epoch_size = epoch_iters * global_batch_size
log.info("Using epoch size: {}".format(args.epoch_size))
Reported by Bandit.
benchmarks/compare-fastrnn-results.py
8 issues
Line: 13
Column: 5
return '{suite}[{test}]:{fwd_bwd}'.format(suite=suite_name, test=test_name, fwd_bwd='bwd' if bwd else 'fwd')
def get_times(json_data):
r = {}
for fwd_bwd in json_data:
for test_name in json_data[fwd_bwd]:
name = construct_name(fwd_bwd, test_name)
r[name] = json_data[fwd_bwd][test_name]
return r
Reported by Pylint.
Line: 1
Column: 1
import argparse
import json
from collections import namedtuple
Result = namedtuple("Result", ["name", "base_time", "diff_time"])
def construct_name(fwd_bwd, test_name):
bwd = 'backward' in fwd_bwd
suite_name = fwd_bwd.replace('-backward', '')
Reported by Pylint.
Line: 1
Column: 1
import argparse
import json
from collections import namedtuple
Result = namedtuple("Result", ["name", "base_time", "diff_time"])
def construct_name(fwd_bwd, test_name):
bwd = 'backward' in fwd_bwd
suite_name = fwd_bwd.replace('-backward', '')
Reported by Pylint.
Line: 7
Column: 1
Result = namedtuple("Result", ["name", "base_time", "diff_time"])
def construct_name(fwd_bwd, test_name):
bwd = 'backward' in fwd_bwd
suite_name = fwd_bwd.replace('-backward', '')
return '{suite}[{test}]:{fwd_bwd}'.format(suite=suite_name, test=test_name, fwd_bwd='bwd' if bwd else 'fwd')
def get_times(json_data):
Reported by Pylint.
Line: 10
Column: 1
def construct_name(fwd_bwd, test_name):
bwd = 'backward' in fwd_bwd
suite_name = fwd_bwd.replace('-backward', '')
return '{suite}[{test}]:{fwd_bwd}'.format(suite=suite_name, test=test_name, fwd_bwd='bwd' if bwd else 'fwd')
def get_times(json_data):
r = {}
for fwd_bwd in json_data:
for test_name in json_data[fwd_bwd]:
Reported by Pylint.
Line: 12
Column: 1
suite_name = fwd_bwd.replace('-backward', '')
return '{suite}[{test}]:{fwd_bwd}'.format(suite=suite_name, test=test_name, fwd_bwd='bwd' if bwd else 'fwd')
def get_times(json_data):
r = {}
for fwd_bwd in json_data:
for test_name in json_data[fwd_bwd]:
name = construct_name(fwd_bwd, test_name)
r[name] = json_data[fwd_bwd][test_name]
Reported by Pylint.
Line: 13
Column: 5
return '{suite}[{test}]:{fwd_bwd}'.format(suite=suite_name, test=test_name, fwd_bwd='bwd' if bwd else 'fwd')
def get_times(json_data):
r = {}
for fwd_bwd in json_data:
for test_name in json_data[fwd_bwd]:
name = construct_name(fwd_bwd, test_name)
r[name] = json_data[fwd_bwd][test_name]
return r
Reported by Pylint.
Line: 51
Column: 1
if args.format == 'md':
print(header_fmt_str.format(":---", "---:", "---:", "---:"))
for r in results:
print(data_fmt_str.format(r.name, r.base_time, r.diff_time, (r.diff_time / r.base_time - 1.0) * 100.0))
elif args.format == 'json':
print(json.dumps(results))
else:
raise ValueError('Unknown output format: ' + args.format)
Reported by Pylint.
aten/src/ATen/cpu/vec/vec256/vec256_int.h
8 issues
Line: 103
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (auto i = 0; i < size(); ++i) {
tmp_values[i] = 0;
}
std::memcpy(tmp_values, ptr, count * sizeof(int64_t));
return loadu(tmp_values);
}
void store(void* ptr, int count = size()) const {
if (count == size()) {
// ptr need not to be aligned here. See
Reported by FlawFinder.
Line: 114
Column: 12
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
} else if (count > 0) {
__at_align__ int64_t tmp_values[size()];
_mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
std::memcpy(ptr, tmp_values, count * sizeof(int64_t));
}
}
const int64_t& operator[](int idx) const = delete;
int64_t& operator[](int idx) = delete;
Vectorized<int64_t> abs() const {
Reported by FlawFinder.
Line: 226
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (auto i = 0; i < size(); ++i) {
tmp_values[i] = 0;
}
std::memcpy(tmp_values, ptr, count * sizeof(int32_t));
return loadu(tmp_values);
}
void store(void* ptr, int count = size()) const {
if (count == size()) {
// ptr need not to be aligned here. See
Reported by FlawFinder.
Line: 237
Column: 12
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
} else if (count > 0) {
__at_align__ int32_t tmp_values[size()];
_mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
std::memcpy(ptr, tmp_values, count * sizeof(int32_t));
}
}
void dump() const {
for (size_t i = 0; i < size(); ++i) {
std::cout << (int)((value_type*)&values)[i] << " ";
Reported by FlawFinder.
Line: 446
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (auto i = 0; i < size(); ++i) {
tmp_values[i] = 0;
}
std::memcpy(tmp_values, ptr, count * sizeof(int16_t));
return loadu(tmp_values);
}
void store(void* ptr, int count = size()) const {
if (count == size()) {
// ptr need not to be aligned here. See
Reported by FlawFinder.
Line: 457
Column: 12
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
} else if (count > 0) {
__at_align__ int16_t tmp_values[size()];
_mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
std::memcpy(ptr, tmp_values, count * sizeof(int16_t));
}
}
const int16_t& operator[](int idx) const = delete;
int16_t& operator[](int idx) = delete;
Vectorized<int16_t> abs() const {
Reported by FlawFinder.
Line: 695
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (size_t i = 0; i < size(); ++i) {
tmp_values[i] = 0;
}
std::memcpy(tmp_values, ptr, count * sizeof(int8_t));
return loadu(tmp_values);
}
void store(void* ptr, int count = size()) const {
if (count == size()) {
// ptr need not to be aligned here. See
Reported by FlawFinder.
Line: 706
Column: 12
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
} else if (count > 0) {
__at_align__ int8_t tmp_values[size()];
_mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
std::memcpy(ptr, tmp_values, count * sizeof(int8_t));
}
}
const int8_t& operator[](int idx) const = delete;
int8_t& operator[](int idx) = delete;
Vectorized<int8_t> abs() const {
Reported by FlawFinder.
.jenkins/pytorch/win-test-helpers/run_python_nn_smoketests.py
8 issues
Line: 1
Column: 1
#!/usr/bin/env python3
import subprocess
import os
COMMON_TESTS = [
(
"Checking that torch is available",
"import torch",
Reported by Pylint.
Line: 3
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
#!/usr/bin/env python3
import subprocess
import os
COMMON_TESTS = [
(
"Checking that torch is available",
"import torch",
Reported by Bandit.
Line: 46
Column: 9
for description, python_commands in TESTS:
print(description)
command_args = ["python", "-c", python_commands]
command_string = " ".join(command_args)
print("Command:", command_string)
try:
subprocess.check_call(command_args)
except subprocess.CalledProcessError as e:
sdk_root = os.environ.get('WindowsSdkDir', 'C:\\Program Files (x86)\\Windows Kits\\10')
Reported by Pylint.
Line: 49
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b603_subprocess_without_shell_equals_true.html
command_string = " ".join(command_args)
print("Command:", command_string)
try:
subprocess.check_call(command_args)
except subprocess.CalledProcessError as e:
sdk_root = os.environ.get('WindowsSdkDir', 'C:\\Program Files (x86)\\Windows Kits\\10')
debugger = os.path.join(sdk_root, 'Debuggers', 'x64', 'cdb.exe')
if os.path.exists(debugger):
command_args = [debugger, "-o", "-c", "~*g; q"] + command_args
Reported by Bandit.
Line: 50
Column: 9
print("Command:", command_string)
try:
subprocess.check_call(command_args)
except subprocess.CalledProcessError as e:
sdk_root = os.environ.get('WindowsSdkDir', 'C:\\Program Files (x86)\\Windows Kits\\10')
debugger = os.path.join(sdk_root, 'Debuggers', 'x64', 'cdb.exe')
if os.path.exists(debugger):
command_args = [debugger, "-o", "-c", "~*g; q"] + command_args
command_string = " ".join(command_args)
Reported by Pylint.
Line: 55
Column: 17
debugger = os.path.join(sdk_root, 'Debuggers', 'x64', 'cdb.exe')
if os.path.exists(debugger):
command_args = [debugger, "-o", "-c", "~*g; q"] + command_args
command_string = " ".join(command_args)
print("Reruning with traceback enabled")
print("Command:", command_string)
subprocess.run(command_args, check=False)
exit(e.returncode)
Reported by Pylint.
Line: 58
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b603_subprocess_without_shell_equals_true.html
command_string = " ".join(command_args)
print("Reruning with traceback enabled")
print("Command:", command_string)
subprocess.run(command_args, check=False)
exit(e.returncode)
Reported by Bandit.
Line: 59
Column: 13
print("Reruning with traceback enabled")
print("Command:", command_string)
subprocess.run(command_args, check=False)
exit(e.returncode)
Reported by Pylint.
caffe2/contrib/playground/resnetdemo/explicit_resnet_param_update.py
8 issues
Line: 10
Column: 47
from caffe2.proto import caffe2_pb2
def gen_param_update_builder_fun(self, model, dataset, is_train):
if not is_train:
return None
else:
# from sherlok
for idx in range(self.opts['distributed']['first_xpu_id'],
Reported by Pylint.
Line: 10
Column: 40
from caffe2.proto import caffe2_pb2
def gen_param_update_builder_fun(self, model, dataset, is_train):
if not is_train:
return None
else:
# from sherlok
for idx in range(self.opts['distributed']['first_xpu_id'],
Reported by Pylint.
Line: 36
Column: 13
[], "ONE", shape=[1], value=1.0
)
'''
Add the momentum-SGD update.
'''
params = model.GetParams()
assert(len(params) > 0)
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import workspace, core
from caffe2.proto import caffe2_pb2
Reported by Pylint.
Line: 10
Column: 1
from caffe2.proto import caffe2_pb2
def gen_param_update_builder_fun(self, model, dataset, is_train):
if not is_train:
return None
else:
# from sherlok
for idx in range(self.opts['distributed']['first_xpu_id'],
Reported by Pylint.
Line: 11
Column: 5
def gen_param_update_builder_fun(self, model, dataset, is_train):
if not is_train:
return None
else:
# from sherlok
for idx in range(self.opts['distributed']['first_xpu_id'],
self.opts['distributed']['first_xpu_id'] +
Reported by Pylint.
Line: 40
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
Add the momentum-SGD update.
'''
params = model.GetParams()
assert(len(params) > 0)
for param in params:
param_grad = model.param_to_grad[param]
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
Reported by Bandit.
Line: 40
Column: 1
Add the momentum-SGD update.
'''
params = model.GetParams()
assert(len(params) > 0)
for param in params:
param_grad = model.param_to_grad[param]
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
Reported by Pylint.
aten/src/ATen/test/wrapdim_test.cpp
8 issues
Line: 8
Column: 26
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
using namespace at;
void TestSimpleCase(DeprecatedTypeProperties& T) {
auto a = randn({2, 3, 4, 5}, T);
ASSERT_TRUE(a.prod(-4).equal(a.prod(0)));
ASSERT_TRUE(a.prod(3).equal(a.prod(-1)));
}
void TestExpressionSpecification(DeprecatedTypeProperties& T) {
auto a = randn({2, 3, 4, 5}, T);
Reported by FlawFinder.
Line: 9
Column: 25
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
void TestSimpleCase(DeprecatedTypeProperties& T) {
auto a = randn({2, 3, 4, 5}, T);
ASSERT_TRUE(a.prod(-4).equal(a.prod(0)));
ASSERT_TRUE(a.prod(3).equal(a.prod(-1)));
}
void TestExpressionSpecification(DeprecatedTypeProperties& T) {
auto a = randn({2, 3, 4, 5}, T);
ASSERT_TRUE(a.unsqueeze(-5).equal(a.unsqueeze(0)));
Reported by FlawFinder.
Line: 14
Column: 31
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
void TestExpressionSpecification(DeprecatedTypeProperties& T) {
auto a = randn({2, 3, 4, 5}, T);
ASSERT_TRUE(a.unsqueeze(-5).equal(a.unsqueeze(0)));
ASSERT_TRUE(a.unsqueeze(4).equal(a.unsqueeze(-1)));
// can unsqueeze scalar
auto b = randn({}, T);
ASSERT_TRUE(b.unsqueeze(0).equal(b.unsqueeze(-1)));
Reported by FlawFinder.
Line: 15
Column: 30
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
void TestExpressionSpecification(DeprecatedTypeProperties& T) {
auto a = randn({2, 3, 4, 5}, T);
ASSERT_TRUE(a.unsqueeze(-5).equal(a.unsqueeze(0)));
ASSERT_TRUE(a.unsqueeze(4).equal(a.unsqueeze(-1)));
// can unsqueeze scalar
auto b = randn({}, T);
ASSERT_TRUE(b.unsqueeze(0).equal(b.unsqueeze(-1)));
}
Reported by FlawFinder.
Line: 19
Column: 30
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
// can unsqueeze scalar
auto b = randn({}, T);
ASSERT_TRUE(b.unsqueeze(0).equal(b.unsqueeze(-1)));
}
void TestEmptyTensor(DeprecatedTypeProperties& T) {
auto a = randn(0, T);
ASSERT_TRUE(a.prod(0).equal(at::ones({}, T)));
Reported by FlawFinder.
Line: 24
Column: 25
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
void TestEmptyTensor(DeprecatedTypeProperties& T) {
auto a = randn(0, T);
ASSERT_TRUE(a.prod(0).equal(at::ones({}, T)));
}
void TestScalarVs1Dim1Size(DeprecatedTypeProperties& T) {
auto a = randn(1, T);
ASSERT_TRUE(a.prod(0).equal(a.prod(-1)));
Reported by FlawFinder.
Line: 29
Column: 25
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
void TestScalarVs1Dim1Size(DeprecatedTypeProperties& T) {
auto a = randn(1, T);
ASSERT_TRUE(a.prod(0).equal(a.prod(-1)));
a.resize_({});
ASSERT_EQ(a.dim(), 0);
ASSERT_TRUE(a.prod(0).equal(a.prod(-1)));
}
Reported by FlawFinder.
Line: 32
Column: 25
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
ASSERT_TRUE(a.prod(0).equal(a.prod(-1)));
a.resize_({});
ASSERT_EQ(a.dim(), 0);
ASSERT_TRUE(a.prod(0).equal(a.prod(-1)));
}
TEST(TestWrapdim, TestWrapdim) {
manual_seed(123);
DeprecatedTypeProperties& T = CPU(kFloat);
Reported by FlawFinder.
benchmarks/instruction_counts/applications/ci.py
8 issues
Line: 44
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b303-md5
)
keys = tuple({str(work_order): None for work_order in work_orders}.keys())
md5 = hashlib.md5()
for key in keys:
md5.update(key.encode("utf-8"))
# Warn early, since collection takes a long time.
if md5.hexdigest() != MD5 and not args.subset:
Reported by Bandit.
Line: 55
Column: 3
results = Runner(work_orders, cadence=30.0).run()
# TODO: Annotate with TypedDict when 3.8 is the minimum supported verson.
grouped_results: Dict[str, Dict[str, List[Union[float, int]]]] = {
key: {"times": [], "counts": []} for key in keys}
for work_order, r in results.items():
key = str(work_order)
Reported by Pylint.
Line: 22
Column: 1
MD5 = "4d55e8abf881ad38bb617a96714c1296"
def main(argv: List[str]) -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--destination", type=str, default=None)
parser.add_argument("--subset", action="store_true")
args = parser.parse_args(argv)
Reported by Pylint.
Line: 22
Column: 1
MD5 = "4d55e8abf881ad38bb617a96714c1296"
def main(argv: List[str]) -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--destination", type=str, default=None)
parser.add_argument("--subset", action="store_true")
args = parser.parse_args(argv)
Reported by Pylint.
Line: 28
Column: 5
parser.add_argument("--subset", action="store_true")
args = parser.parse_args(argv)
t0 = int(time.time())
version = VERSION
benchmarks = materialize(BENCHMARKS)
# Useful for local development, since e2e time for the full suite is O(1 hour)
in_debug_mode = (args.subset or args.destination is None)
Reported by Pylint.
Line: 59
Column: 21
grouped_results: Dict[str, Dict[str, List[Union[float, int]]]] = {
key: {"times": [], "counts": []} for key in keys}
for work_order, r in results.items():
key = str(work_order)
grouped_results[key]["times"].extend(r.wall_times)
grouped_results[key]["counts"].extend(r.instructions)
final_results = {
Reported by Pylint.
Line: 73
Column: 46
}
if args.destination:
with open(args.destination, "wt") as f:
json.dump(final_results, f)
if in_debug_mode:
result_str = json.dumps(final_results)
print(f"{result_str[:30]} ... {result_str[-30:]}\n")
Reported by Pylint.
Line: 79
Column: 9
if in_debug_mode:
result_str = json.dumps(final_results)
print(f"{result_str[:30]} ... {result_str[-30:]}\n")
import pdb
pdb.set_trace()
Reported by Pylint.
.circleci/ensure-consistency.py
8 issues
Line: 1
Column: 1
#!/usr/bin/env python3
import os
import subprocess
import sys
import tempfile
import generate_config_yml
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
import os
import subprocess
import sys
import tempfile
import generate_config_yml
Reported by Pylint.
Line: 4
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
#!/usr/bin/env python3
import os
import subprocess
import sys
import tempfile
import generate_config_yml
Reported by Bandit.
Line: 23
Column: 1
"""
def check_consistency():
_, temp_filename = tempfile.mkstemp("-generated-config.yml")
with open(temp_filename, "w") as fh:
generate_config_yml.stitch_sources(fh)
Reported by Pylint.
Line: 27
Column: 38
_, temp_filename = tempfile.mkstemp("-generated-config.yml")
with open(temp_filename, "w") as fh:
generate_config_yml.stitch_sources(fh)
try:
subprocess.check_call(["cmp", temp_filename, CHECKED_IN_FILE])
except subprocess.CalledProcessError:
Reported by Pylint.
Line: 31
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b603_subprocess_without_shell_equals_true.html
generate_config_yml.stitch_sources(fh)
try:
subprocess.check_call(["cmp", temp_filename, CHECKED_IN_FILE])
except subprocess.CalledProcessError:
sys.exit(ERROR_MESSAGE_TEMPLATE % (CHECKED_IN_FILE, REGENERATION_SCRIPT, PARENT_DIR, README_PATH))
finally:
os.remove(temp_filename)
Reported by Bandit.
Line: 31
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b607_start_process_with_partial_path.html
generate_config_yml.stitch_sources(fh)
try:
subprocess.check_call(["cmp", temp_filename, CHECKED_IN_FILE])
except subprocess.CalledProcessError:
sys.exit(ERROR_MESSAGE_TEMPLATE % (CHECKED_IN_FILE, REGENERATION_SCRIPT, PARENT_DIR, README_PATH))
finally:
os.remove(temp_filename)
Reported by Bandit.
Line: 33
Column: 1
try:
subprocess.check_call(["cmp", temp_filename, CHECKED_IN_FILE])
except subprocess.CalledProcessError:
sys.exit(ERROR_MESSAGE_TEMPLATE % (CHECKED_IN_FILE, REGENERATION_SCRIPT, PARENT_DIR, README_PATH))
finally:
os.remove(temp_filename)
if __name__ == "__main__":
Reported by Pylint.