The following issues were found
caffe2/python/lazy.py
10 issues
Line: 14
Column: 9
def TriggerLazyImport():
global _import_lazy_calls
for lazy in _import_lazy_calls:
lazy()
Reported by Pylint.
Line: 7
Column: 5
_import_lazy_calls = []
def RegisterLazyImport(lazy):
global _import_lazy_calls
_import_lazy_calls += [lazy]
def TriggerLazyImport():
global _import_lazy_calls
Reported by Pylint.
Line: 12
Column: 5
def TriggerLazyImport():
global _import_lazy_calls
for lazy in _import_lazy_calls:
lazy()
Reported by Pylint.
Line: 1
Column: 1
## @package workspace
# Module caffe2.python.lazy
_import_lazy_calls = []
def RegisterLazyImport(lazy):
global _import_lazy_calls
_import_lazy_calls += [lazy]
Reported by Pylint.
Line: 6
Column: 1
_import_lazy_calls = []
def RegisterLazyImport(lazy):
global _import_lazy_calls
_import_lazy_calls += [lazy]
def TriggerLazyImport():
Reported by Pylint.
Line: 6
Column: 1
_import_lazy_calls = []
def RegisterLazyImport(lazy):
global _import_lazy_calls
_import_lazy_calls += [lazy]
def TriggerLazyImport():
Reported by Pylint.
Line: 7
Column: 5
_import_lazy_calls = []
def RegisterLazyImport(lazy):
global _import_lazy_calls
_import_lazy_calls += [lazy]
def TriggerLazyImport():
global _import_lazy_calls
Reported by Pylint.
Line: 11
Column: 1
_import_lazy_calls += [lazy]
def TriggerLazyImport():
global _import_lazy_calls
for lazy in _import_lazy_calls:
lazy()
Reported by Pylint.
Line: 11
Column: 1
_import_lazy_calls += [lazy]
def TriggerLazyImport():
global _import_lazy_calls
for lazy in _import_lazy_calls:
lazy()
Reported by Pylint.
Line: 12
Column: 5
def TriggerLazyImport():
global _import_lazy_calls
for lazy in _import_lazy_calls:
lazy()
Reported by Pylint.
caffe2/python/layers/sparse_itemwise_dropout_with_replacement.py
10 issues
Line: 1
Column: 1
from caffe2.python import schema
from caffe2.python.layers.layers import (
IdList,
ModelLayer,
Reported by Pylint.
Line: 32
Column: 1
# OutputLengths: [2, 3]
# where the 2nd item in 2nd IdList feature [4] was replaced with [-1].
class SparseItemwiseDropoutWithReplacement(ModelLayer):
def __init__(
self,
model,
input_record,
dropout_prob_train,
Reported by Pylint.
Line: 33
Column: 5
# where the 2nd item in 2nd IdList feature [4] was replaced with [-1].
class SparseItemwiseDropoutWithReplacement(ModelLayer):
def __init__(
self,
model,
input_record,
dropout_prob_train,
dropout_prob_eval,
Reported by Pylint.
Line: 44
Column: 1
name='sparse_itemwise_dropout',
**kwargs):
super(SparseItemwiseDropoutWithReplacement, self).__init__(model, name, input_record, **kwargs)
assert schema.equal_schemas(input_record, IdList), "Incorrect input type"
self.dropout_prob_train = float(dropout_prob_train)
self.dropout_prob_eval = float(dropout_prob_eval)
self.dropout_prob_predict = float(dropout_prob_predict)
Reported by Pylint.
Line: 44
Column: 9
name='sparse_itemwise_dropout',
**kwargs):
super(SparseItemwiseDropoutWithReplacement, self).__init__(model, name, input_record, **kwargs)
assert schema.equal_schemas(input_record, IdList), "Incorrect input type"
self.dropout_prob_train = float(dropout_prob_train)
self.dropout_prob_eval = float(dropout_prob_eval)
self.dropout_prob_predict = float(dropout_prob_predict)
Reported by Pylint.
Line: 45
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
**kwargs):
super(SparseItemwiseDropoutWithReplacement, self).__init__(model, name, input_record, **kwargs)
assert schema.equal_schemas(input_record, IdList), "Incorrect input type"
self.dropout_prob_train = float(dropout_prob_train)
self.dropout_prob_eval = float(dropout_prob_eval)
self.dropout_prob_predict = float(dropout_prob_predict)
self.replacement_value = int(replacement_value)
Reported by Bandit.
Line: 51
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self.dropout_prob_eval = float(dropout_prob_eval)
self.dropout_prob_predict = float(dropout_prob_predict)
self.replacement_value = int(replacement_value)
assert (self.dropout_prob_train >= 0 and
self.dropout_prob_train <= 1.0), \
"Expected 0 <= dropout_prob_train <= 1, but got %s" \
% self.dropout_prob_train
assert (self.dropout_prob_eval >= 0 and
self.dropout_prob_eval <= 1.0), \
Reported by Bandit.
Line: 55
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self.dropout_prob_train <= 1.0), \
"Expected 0 <= dropout_prob_train <= 1, but got %s" \
% self.dropout_prob_train
assert (self.dropout_prob_eval >= 0 and
self.dropout_prob_eval <= 1.0), \
"Expected 0 <= dropout_prob_eval <= 1, but got %s" \
% dropout_prob_eval
assert (self.dropout_prob_predict >= 0 and
self.dropout_prob_predict <= 1.0), \
Reported by Bandit.
Line: 59
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self.dropout_prob_eval <= 1.0), \
"Expected 0 <= dropout_prob_eval <= 1, but got %s" \
% dropout_prob_eval
assert (self.dropout_prob_predict >= 0 and
self.dropout_prob_predict <= 1.0), \
"Expected 0 <= dropout_prob_predict <= 1, but got %s" \
% dropout_prob_predict
assert(self.dropout_prob_train > 0 or
self.dropout_prob_eval > 0 or
Reported by Bandit.
Line: 63
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self.dropout_prob_predict <= 1.0), \
"Expected 0 <= dropout_prob_predict <= 1, but got %s" \
% dropout_prob_predict
assert(self.dropout_prob_train > 0 or
self.dropout_prob_eval > 0 or
self.dropout_prob_predict > 0), \
"Ratios all set to 0.0 for train, eval and predict"
self.output_schema = schema.NewRecord(model.net, IdList)
Reported by Bandit.
caffe2/python/layers/label_smooth.py
10 issues
Line: 75
Column: 13
float32_label = self.label()
net.StumpFunc(
float32_label,
self.output_schema(),
threshold=0.5,
low_value=self.smooth_matrix[0],
high_value=self.smooth_matrix[1],
)
Reported by Pylint.
Line: 89
Column: 57
int64_label = self.label()
one_hot_label = net.NextScopedBlob('one_hot_label')
net.OneHot([int64_label, self.len], [one_hot_label])
net.MatMul([one_hot_label, self.smooth_matrix], self.output_schema())
def add_ops(self, net):
if self.binary_prob_label:
self.add_ops_for_binary_prob_label(net)
else:
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 28
Column: 1
import numpy as np
class LabelSmooth(ModelLayer):
def __init__(
self, model, label, smooth_matrix, name='label_smooth', **kwargs
):
super(LabelSmooth, self).__init__(model, name, label, **kwargs)
self.label = label
Reported by Pylint.
Line: 32
Column: 9
def __init__(
self, model, label, smooth_matrix, name='label_smooth', **kwargs
):
super(LabelSmooth, self).__init__(model, name, label, **kwargs)
self.label = label
# shape as a list
smooth_matrix = np.array(smooth_matrix).astype(np.float32).flatten()
self.set_dim(smooth_matrix)
self.set_smooth_matrix(smooth_matrix)
Reported by Pylint.
Line: 43
Column: 5
self.get_next_blob_reference('smoothed_label')
)
def set_dim(self, smooth_matrix):
num_elements = smooth_matrix.size
self.binary_prob_label = (num_elements == 2)
if self.binary_prob_label:
self.dim = 1
else:
Reported by Pylint.
Line: 49
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if self.binary_prob_label:
self.dim = 1
else:
assert np.sqrt(num_elements)**2 == num_elements
self.dim = int(np.sqrt(num_elements))
def set_smooth_matrix(self, smooth_matrix):
if not self.binary_prob_label:
self.smooth_matrix = self.model.add_global_constant(
Reported by Bandit.
Line: 52
Column: 5
assert np.sqrt(num_elements)**2 == num_elements
self.dim = int(np.sqrt(num_elements))
def set_smooth_matrix(self, smooth_matrix):
if not self.binary_prob_label:
self.smooth_matrix = self.model.add_global_constant(
'%s_label_smooth_matrix' % self.name,
array=smooth_matrix.reshape((self.dim, self.dim)),
dtype=np.dtype(np.float32),
Reported by Pylint.
Line: 67
Column: 5
else:
self.smooth_matrix = smooth_matrix
def add_ops_for_binary_prob_label(self, net):
if self.label.field_type().base != np.float32:
float32_label = net.NextScopedBlob('float32_label')
net.Cast([self.label()], [float32_label], to=core.DataType.FLOAT)
else:
float32_label = self.label()
Reported by Pylint.
Line: 81
Column: 5
high_value=self.smooth_matrix[1],
)
def add_ops_for_categorical_label(self, net):
if self.label.field_type().base != np.int64:
int64_label = net.NextScopedBlob('int64_label')
net.Cast([self.label()], [int64_label], to=core.DataType.INT64)
else:
int64_label = self.label()
Reported by Pylint.
caffe2/python/layers/fc_without_bias.py
10 issues
Line: 55
Column: 5
initializer=weight_init,
optimizer=weight_optim)
def _add_ops(self, net, params):
net.MatMul(
self.input_record.field_blobs() + params,
self.output_schema.field_blobs(), trans_b=1, **self.kwargs
)
Reported by Pylint.
Line: 1
Column: 1
## @package fc_without_bias
# Module caffe2.python.layers.fc_without_bias
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
Reported by Pylint.
Line: 12
Column: 1
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
import math
import numpy as np
class FCWithoutBias(SamplingTrainableMixin, ModelLayer):
def __init__(
Reported by Pylint.
Line: 16
Column: 1
import numpy as np
class FCWithoutBias(SamplingTrainableMixin, ModelLayer):
def __init__(
self,
model,
input_record,
output_dims,
Reported by Pylint.
Line: 17
Column: 5
class FCWithoutBias(SamplingTrainableMixin, ModelLayer):
def __init__(
self,
model,
input_record,
output_dims,
weight_init=None,
Reported by Pylint.
Line: 28
Column: 9
uniform_weight_init_scale_numerator=1.0,
**kwargs
):
super(FCWithoutBias, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_types()[0].shape) > 0, (
"FCWithoutBias expects limited dimensions of the input tensor"
)
Reported by Pylint.
Line: 29
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
**kwargs
):
super(FCWithoutBias, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_types()[0].shape) > 0, (
"FCWithoutBias expects limited dimensions of the input tensor"
)
input_dims = input_record.field_types()[0].shape[0]
Reported by Bandit.
Line: 30
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
):
super(FCWithoutBias, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_types()[0].shape) > 0, (
"FCWithoutBias expects limited dimensions of the input tensor"
)
input_dims = input_record.field_types()[0].shape[0]
assert input_dims > 0, (
Reported by Bandit.
Line: 35
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
)
input_dims = input_record.field_types()[0].shape[0]
assert input_dims > 0, (
"FCWithoutBias expects input dimensions > 0, got {}".format(input_dims)
)
self.output_schema = schema.Scalar(
(np.float32, (output_dims, )),
Reported by Bandit.
Line: 50
Column: 9
'max': scale}
)
self.w = self.create_param(param_name='w',
shape=[output_dims, input_dims],
initializer=weight_init,
optimizer=weight_optim)
def _add_ops(self, net, params):
Reported by Pylint.
aten/src/ATen/test/broadcast_test.cpp
10 issues
Line: 21
Column: 15
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto b = randn({5}, T);
std::vector<int64_t> expanded_sizes = {3, 5};
ASSERT_TRUE(
(a + b).equal(a.expand(expanded_sizes) + b.expand(expanded_sizes)));
}
// with scalar
void TestOut2WithScalar(DeprecatedTypeProperties& T) {
auto aScalar = ones({}, T);
Reported by FlawFinder.
Line: 29
Column: 21
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto aScalar = ones({}, T);
auto b = randn({3, 5}, T);
ASSERT_TRUE(
(aScalar + b).equal(aScalar.expand(b.sizes()) + b.expand(b.sizes())));
}
// old fallback behavior yields error
void TestOut2OldFallback(DeprecatedTypeProperties& T) {
auto a = randn({3, 5}, T);
Reported by FlawFinder.
Line: 54
Column: 27
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto b = randn({1, 2, 1}, T);
auto c = randn({1, 1, 5}, T);
std::vector<int64_t> expanded_sizes = {3, 2, 5};
ASSERT_TRUE((a + b + c).equal(
a.expand(expanded_sizes) + b.expand(expanded_sizes) +
c.expand(expanded_sizes)));
}
// with scalar
Reported by FlawFinder.
Line: 65
Column: 43
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto b = randn({3, 2, 1}, T);
auto c = randn({1, 2, 5}, T);
std::vector<int64_t> expanded_sizes = {3, 2, 5};
ASSERT_TRUE(aTensorScalar.addcmul(b, c).equal(
aTensorScalar.expand(expanded_sizes)
.addcmul(b.expand(expanded_sizes), c.expand(expanded_sizes))));
}
// old fallback behavior yields error
Reported by FlawFinder.
Line: 92
Column: 23
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
void TestIn2Basic(DeprecatedTypeProperties& T) {
auto a = randn({3, 5}, T);
auto b = randn({3, 1}, T);
ASSERT_TRUE((a + b).equal(a + b.expand({3, 5})));
}
// with scalar
void TestIn2WithScalar(DeprecatedTypeProperties& T) {
auto a = randn({3, 5}, T);
Reported by FlawFinder.
Line: 99
Column: 29
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
void TestIn2WithScalar(DeprecatedTypeProperties& T) {
auto a = randn({3, 5}, T);
auto bScalar = ones({}, T);
ASSERT_TRUE((a + bScalar).equal(a + bScalar.expand(a.sizes())));
}
// error: would have to expand inplace arg
void TestIn2ExpandError(DeprecatedTypeProperties& T) {
auto a = randn({1, 5}, T);
Reported by FlawFinder.
Line: 116
Column: 32
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto b = randn({3, 1, 2}, T);
auto c = randn({1, 5, 1}, T);
auto aClone = a.clone();
ASSERT_TRUE(a.addcmul_(b, c).equal(
aClone.addcmul_(b.expand(a.sizes()), c.expand(a.sizes()))));
}
// with scalar
void TestIn3WithScalar(DeprecatedTypeProperties& T) {
Reported by FlawFinder.
Line: 128
Column: 20
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto aClone = a.clone();
auto bScalar = ones({}, T);
ASSERT_TRUE(a.addcmul_(bScalar, c)
.equal(aClone.addcmul_(
bScalar.expand(a.sizes()), c.expand(a.sizes()))));
}
// error: would have to expand inplace arg
void TestIn3ExpandError(DeprecatedTypeProperties& T) {
Reported by FlawFinder.
Line: 146
Column: 29
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto a = randn({1}, T);
auto b = randn({5, 3}, T);
auto c = randn({3, 7}, T);
ASSERT_TRUE(a.addmm(b, c).equal(a.expand({5, 7}).addmm(b, c)));
}
// with scalar
void TestExplicitDimWithScalar(DeprecatedTypeProperties& T) {
auto a = randn({1}, T);
Reported by FlawFinder.
Line: 155
Column: 35
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto b = randn({5, 3}, T);
auto c = randn({3, 7}, T);
Tensor aScalar = ones({}, T);
ASSERT_TRUE(aScalar.addmm(b, c).equal(aScalar.expand({5, 7}).addmm(b, c)));
}
// with mismatched sizes
void TestExplicitDimWithMismatchedSizes(DeprecatedTypeProperties& T) {
auto b = randn({5, 3}, T);
Reported by FlawFinder.
aten/src/ATen/test/cpu_generator_test.cpp
10 issues
Line: 38
Column: 13
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
auto gen1 = at::detail::createCPUGenerator();
auto cpu_gen1 = check_generator<CPUGeneratorImpl>(gen1);
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
cpu_gen1->random(); // advance gen1 state
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
cpu_gen1->random();
auto gen2 = at::detail::createCPUGenerator();
gen2 = gen1.clone();
auto cpu_gen2 = check_generator<CPUGeneratorImpl>(gen2);
Reported by FlawFinder.
Line: 40
Column: 13
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
cpu_gen1->random(); // advance gen1 state
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
cpu_gen1->random();
auto gen2 = at::detail::createCPUGenerator();
gen2 = gen1.clone();
auto cpu_gen2 = check_generator<CPUGeneratorImpl>(gen2);
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
ASSERT_EQ(cpu_gen1->random(), cpu_gen2->random());
Reported by FlawFinder.
Line: 45
Column: 43
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
gen2 = gen1.clone();
auto cpu_gen2 = check_generator<CPUGeneratorImpl>(gen2);
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
ASSERT_EQ(cpu_gen1->random(), cpu_gen2->random());
}
void thread_func_get_engine_op(CPUGeneratorImpl* generator) {
std::lock_guard<std::mutex> lock(generator->mutex_);
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
Reported by FlawFinder.
Line: 45
Column: 23
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
gen2 = gen1.clone();
auto cpu_gen2 = check_generator<CPUGeneratorImpl>(gen2);
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
ASSERT_EQ(cpu_gen1->random(), cpu_gen2->random());
}
void thread_func_get_engine_op(CPUGeneratorImpl* generator) {
std::lock_guard<std::mutex> lock(generator->mutex_);
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
Reported by FlawFinder.
Line: 51
Column: 14
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
void thread_func_get_engine_op(CPUGeneratorImpl* generator) {
std::lock_guard<std::mutex> lock(generator->mutex_);
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
generator->random();
}
TEST(CPUGeneratorImpl, TestMultithreadingGetEngineOperator) {
// Test Description:
// Check CPUGeneratorImpl is reentrant and the engine state
Reported by FlawFinder.
Line: 76
Column: 13
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
std::lock_guard<std::mutex> lock(gen2.mutex());
auto cpu_gen2 = check_generator<CPUGeneratorImpl>(gen2);
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
cpu_gen2->random();
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
cpu_gen2->random();
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
cpu_gen2->random();
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
Reported by FlawFinder.
Line: 78
Column: 13
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
cpu_gen2->random();
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
cpu_gen2->random();
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
cpu_gen2->random();
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
ASSERT_EQ(cpu_gen1->random(), cpu_gen2->random());
}
Reported by FlawFinder.
Line: 80
Column: 13
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
cpu_gen2->random();
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
cpu_gen2->random();
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
ASSERT_EQ(cpu_gen1->random(), cpu_gen2->random());
}
TEST(CPUGeneratorImpl, TestGetSetCurrentSeed) {
Reported by FlawFinder.
Line: 82
Column: 23
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
cpu_gen2->random();
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
ASSERT_EQ(cpu_gen1->random(), cpu_gen2->random());
}
TEST(CPUGeneratorImpl, TestGetSetCurrentSeed) {
// Test Description:
// Test current seed getter and setter
Reported by FlawFinder.
Line: 82
Column: 43
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
cpu_gen2->random();
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
ASSERT_EQ(cpu_gen1->random(), cpu_gen2->random());
}
TEST(CPUGeneratorImpl, TestGetSetCurrentSeed) {
// Test Description:
// Test current seed getter and setter
Reported by FlawFinder.
benchmarks/distributed/rpc/parameter_server/models/DummyModel.py
10 issues
Line: 1
Column: 1
import torch.nn as nn
import torch.nn.functional as F
class DummyModel(nn.Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
Reported by Pylint.
Line: 2
Column: 1
import torch.nn as nn
import torch.nn.functional as F
class DummyModel(nn.Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
Reported by Pylint.
Line: 1
Column: 1
import torch.nn as nn
import torch.nn.functional as F
class DummyModel(nn.Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
Reported by Pylint.
Line: 1
Column: 1
import torch.nn as nn
import torch.nn.functional as F
class DummyModel(nn.Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
Reported by Pylint.
Line: 5
Column: 1
import torch.nn.functional as F
class DummyModel(nn.Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
dense_input_size: int,
Reported by Pylint.
Line: 5
Column: 1
import torch.nn.functional as F
class DummyModel(nn.Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
dense_input_size: int,
Reported by Pylint.
Line: 6
Column: 5
class DummyModel(nn.Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
dense_input_size: int,
dense_output_size: int,
Reported by Pylint.
Line: 29
Column: 1
self.embedding = nn.EmbeddingBag(
num_embeddings, embedding_dim, sparse=sparse
)
self.dense = nn.Sequential(*[nn.Linear(dense_input_size, dense_output_size) for _ in range(dense_layers_count)])
def forward(self, x):
x = self.embedding(x)
return F.softmax(self.dense(x), dim=1)
Reported by Pylint.
Line: 31
Column: 5
)
self.dense = nn.Sequential(*[nn.Linear(dense_input_size, dense_output_size) for _ in range(dense_layers_count)])
def forward(self, x):
x = self.embedding(x)
return F.softmax(self.dense(x), dim=1)
Reported by Pylint.
Line: 31
Column: 5
)
self.dense = nn.Sequential(*[nn.Linear(dense_input_size, dense_output_size) for _ in range(dense_layers_count)])
def forward(self, x):
x = self.embedding(x)
return F.softmax(self.dense(x), dim=1)
Reported by Pylint.
benchmarks/functional_autograd_benchmark/compare.py
10 issues
Line: 1
Column: 1
import argparse
from collections import defaultdict
from utils import to_markdown_table, from_markdown_table
def main():
parser = argparse.ArgumentParser("Main script to compare results from the benchmarks")
parser.add_argument("--before", type=str, default="before.txt", help="Text file containing the times to use as base")
parser.add_argument("--after", type=str, default="after.txt", help="Text file containing the times to use as new version")
Reported by Pylint.
Line: 6
Column: 1
from utils import to_markdown_table, from_markdown_table
def main():
parser = argparse.ArgumentParser("Main script to compare results from the benchmarks")
parser.add_argument("--before", type=str, default="before.txt", help="Text file containing the times to use as base")
parser.add_argument("--after", type=str, default="after.txt", help="Text file containing the times to use as new version")
parser.add_argument("--output", type=str, default="", help="Text file where to write the output")
args = parser.parse_args()
Reported by Pylint.
Line: 8
Column: 1
def main():
parser = argparse.ArgumentParser("Main script to compare results from the benchmarks")
parser.add_argument("--before", type=str, default="before.txt", help="Text file containing the times to use as base")
parser.add_argument("--after", type=str, default="after.txt", help="Text file containing the times to use as new version")
parser.add_argument("--output", type=str, default="", help="Text file where to write the output")
args = parser.parse_args()
with open(args.before, "r") as f:
Reported by Pylint.
Line: 9
Column: 1
def main():
parser = argparse.ArgumentParser("Main script to compare results from the benchmarks")
parser.add_argument("--before", type=str, default="before.txt", help="Text file containing the times to use as base")
parser.add_argument("--after", type=str, default="after.txt", help="Text file containing the times to use as new version")
parser.add_argument("--output", type=str, default="", help="Text file where to write the output")
args = parser.parse_args()
with open(args.before, "r") as f:
content = f.read()
Reported by Pylint.
Line: 10
Column: 1
parser = argparse.ArgumentParser("Main script to compare results from the benchmarks")
parser.add_argument("--before", type=str, default="before.txt", help="Text file containing the times to use as base")
parser.add_argument("--after", type=str, default="after.txt", help="Text file containing the times to use as new version")
parser.add_argument("--output", type=str, default="", help="Text file where to write the output")
args = parser.parse_args()
with open(args.before, "r") as f:
content = f.read()
res_before = from_markdown_table(content)
Reported by Pylint.
Line: 13
Column: 36
parser.add_argument("--output", type=str, default="", help="Text file where to write the output")
args = parser.parse_args()
with open(args.before, "r") as f:
content = f.read()
res_before = from_markdown_table(content)
with open(args.after, "r") as f:
content = f.read()
Reported by Pylint.
Line: 17
Column: 35
content = f.read()
res_before = from_markdown_table(content)
with open(args.after, "r") as f:
content = f.read()
res_after = from_markdown_table(content)
diff = defaultdict(defaultdict)
for model in res_before:
Reported by Pylint.
Line: 29
Column: 1
diff[model][task] = (None, mean_before, var_before, None, None)
else:
mean_after, var_after = res_after[model][task]
diff[model][task] = (mean_before / mean_after, mean_before, var_before, mean_after, var_after)
for model in res_after:
for task in res_after[model]:
if task not in res_before[model]:
mean_after, var_after = res_after[model][task]
diff[model][task] = (None, None, None, mean_after, var_after)
Reported by Pylint.
Line: 36
Column: 1
mean_after, var_after = res_after[model][task]
diff[model][task] = (None, None, None, mean_after, var_after)
header = ("model", "task", "speedup", "mean (before)", "var (before)", "mean (after)", "var (after)")
out = to_markdown_table(diff, header=header)
print(out)
if args.output:
with open(args.output, "w") as f:
Reported by Pylint.
Line: 41
Column: 40
print(out)
if args.output:
with open(args.output, "w") as f:
f.write(out)
if __name__ == "__main__":
main()
Reported by Pylint.
benchmarks/operator_benchmark/common/tests/add_ops_list_test.py
10 issues
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
# Configs for pointwise unary ops
unary_ops_configs = op_bench.config_list(
attrs=[
[128, 128],
],
Reported by Pylint.
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
# Configs for pointwise unary ops
unary_ops_configs = op_bench.config_list(
attrs=[
[128, 128],
],
Reported by Pylint.
Line: 26
Column: 9
class UnaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, op_func):
self.input_one = torch.rand(M, N)
self.op_func = op_func
def forward(self):
return self.op_func(self.input_one)
Reported by Pylint.
Line: 27
Column: 9
class UnaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, op_func):
self.input_one = torch.rand(M, N)
self.op_func = op_func
def forward(self):
return self.op_func(self.input_one)
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
# Configs for pointwise unary ops
unary_ops_configs = op_bench.config_list(
attrs=[
[128, 128],
],
Reported by Pylint.
Line: 24
Column: 1
)
class UnaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, op_func):
self.input_one = torch.rand(M, N)
self.op_func = op_func
def forward(self):
Reported by Pylint.
Line: 25
Column: 5
class UnaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, op_func):
self.input_one = torch.rand(M, N)
self.op_func = op_func
def forward(self):
return self.op_func(self.input_one)
Reported by Pylint.
Line: 25
Column: 5
class UnaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, op_func):
self.input_one = torch.rand(M, N)
self.op_func = op_func
def forward(self):
return self.op_func(self.input_one)
Reported by Pylint.
Line: 25
Column: 5
class UnaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, op_func):
self.input_one = torch.rand(M, N)
self.op_func = op_func
def forward(self):
return self.op_func(self.input_one)
Reported by Pylint.
Line: 29
Column: 5
self.input_one = torch.rand(M, N)
self.op_func = op_func
def forward(self):
return self.op_func(self.input_one)
op_bench.generate_pt_tests_from_op_list(unary_ops_list, unary_ops_configs, UnaryOpBenchmark)
Reported by Pylint.
benchmarks/operator_benchmark/operator_benchmark.py
10 issues
Line: 2
Column: 1
# TODO (mingzhe09088): get rid of noqa
import benchmark_runner # noqa: F401
from benchmark_pytorch import TorchBenchmarkBase # noqa: F401
from benchmark_test_generator import * # noqa: F401,F403
from benchmark_utils import * # noqa: F401,F403
Reported by Pylint.
Line: 3
Column: 1
# TODO (mingzhe09088): get rid of noqa
import benchmark_runner # noqa: F401
from benchmark_pytorch import TorchBenchmarkBase # noqa: F401
from benchmark_test_generator import * # noqa: F401,F403
from benchmark_utils import * # noqa: F401,F403
Reported by Pylint.
Line: 4
Column: 1
# TODO (mingzhe09088): get rid of noqa
import benchmark_runner # noqa: F401
from benchmark_pytorch import TorchBenchmarkBase # noqa: F401
from benchmark_test_generator import * # noqa: F401,F403
from benchmark_utils import * # noqa: F401,F403
Reported by Pylint.
Line: 5
Column: 1
import benchmark_runner # noqa: F401
from benchmark_pytorch import TorchBenchmarkBase # noqa: F401
from benchmark_test_generator import * # noqa: F401,F403
from benchmark_utils import * # noqa: F401,F403
Reported by Pylint.
Line: 1
Column: 3
# TODO (mingzhe09088): get rid of noqa
import benchmark_runner # noqa: F401
from benchmark_pytorch import TorchBenchmarkBase # noqa: F401
from benchmark_test_generator import * # noqa: F401,F403
from benchmark_utils import * # noqa: F401,F403
Reported by Pylint.
Line: 2
Column: 1
# TODO (mingzhe09088): get rid of noqa
import benchmark_runner # noqa: F401
from benchmark_pytorch import TorchBenchmarkBase # noqa: F401
from benchmark_test_generator import * # noqa: F401,F403
from benchmark_utils import * # noqa: F401,F403
Reported by Pylint.
Line: 3
Column: 1
# TODO (mingzhe09088): get rid of noqa
import benchmark_runner # noqa: F401
from benchmark_pytorch import TorchBenchmarkBase # noqa: F401
from benchmark_test_generator import * # noqa: F401,F403
from benchmark_utils import * # noqa: F401,F403
Reported by Pylint.
Line: 4
Column: 1
# TODO (mingzhe09088): get rid of noqa
import benchmark_runner # noqa: F401
from benchmark_pytorch import TorchBenchmarkBase # noqa: F401
from benchmark_test_generator import * # noqa: F401,F403
from benchmark_utils import * # noqa: F401,F403
Reported by Pylint.
Line: 5
Column: 1
import benchmark_runner # noqa: F401
from benchmark_pytorch import TorchBenchmarkBase # noqa: F401
from benchmark_test_generator import * # noqa: F401,F403
from benchmark_utils import * # noqa: F401,F403
Reported by Pylint.
Line: 1
Column: 1
# TODO (mingzhe09088): get rid of noqa
import benchmark_runner # noqa: F401
from benchmark_pytorch import TorchBenchmarkBase # noqa: F401
from benchmark_test_generator import * # noqa: F401,F403
from benchmark_utils import * # noqa: F401,F403
Reported by Pylint.