The following issues were found
.circleci/cimodel/data/simple/binary_smoketest.py
6 issues
Line: 25
Column: 1
import cimodel.data.simple.util.branch_filters
class SmoketestJob:
def __init__(self,
template_name,
build_env_parts,
docker_image,
job_name,
Reported by Pylint.
Line: 25
Column: 1
import cimodel.data.simple.util.branch_filters
class SmoketestJob:
def __init__(self,
template_name,
build_env_parts,
docker_image,
job_name,
Reported by Pylint.
Line: 25
Column: 1
import cimodel.data.simple.util.branch_filters
class SmoketestJob:
def __init__(self,
template_name,
build_env_parts,
docker_image,
job_name,
Reported by Pylint.
Line: 26
Column: 5
class SmoketestJob:
def __init__(self,
template_name,
build_env_parts,
docker_image,
job_name,
is_master_only=False,
Reported by Pylint.
Line: 45
Column: 5
self.has_libtorch_variant = has_libtorch_variant
self.extra_props = extra_props or {}
def gen_tree(self):
props_dict = {
"build_environment": " ".join(self.build_env_parts),
"name": self.job_name,
"requires": self.requires,
Reported by Pylint.
Line: 192
Column: 1
]
def get_workflow_jobs():
return [item.gen_tree() for item in WORKFLOW_DATA]
Reported by Pylint.
caffe2/python/helpers/tools.py
6 issues
Line: 18
Column: 13
kwargs['use_gpu_transform'] = 1 if use_gpu_transform else 0
# GPU transform will handle NHWC -> NCHW
outputs = model.net.ImageInput(blob_in, blob_out, **kwargs)
pass
else:
outputs = model.net.ImageInput(
blob_in, [blob_out[0] + '_nhwc'] + blob_out[1:], **kwargs
)
outputs_list = list(outputs)
Reported by Pylint.
Line: 1
Column: 1
## @package tools
# Module caffe2.python.helpers.tools
def image_input(
Reported by Pylint.
Line: 9
Column: 1
def image_input(
model, blob_in, blob_out, order="NCHW", use_gpu_transform=False, **kwargs
):
assert 'is_test' in kwargs, "Argument 'is_test' is required"
if order == "NCHW":
if (use_gpu_transform):
Reported by Pylint.
Line: 12
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def image_input(
model, blob_in, blob_out, order="NCHW", use_gpu_transform=False, **kwargs
):
assert 'is_test' in kwargs, "Argument 'is_test' is required"
if order == "NCHW":
if (use_gpu_transform):
kwargs['use_gpu_transform'] = 1 if use_gpu_transform else 0
# GPU transform will handle NHWC -> NCHW
outputs = model.net.ImageInput(blob_in, blob_out, **kwargs)
Reported by Bandit.
Line: 14
Column: 1
):
assert 'is_test' in kwargs, "Argument 'is_test' is required"
if order == "NCHW":
if (use_gpu_transform):
kwargs['use_gpu_transform'] = 1 if use_gpu_transform else 0
# GPU transform will handle NHWC -> NCHW
outputs = model.net.ImageInput(blob_in, blob_out, **kwargs)
pass
else:
Reported by Pylint.
Line: 31
Column: 1
return outputs
def video_input(model, blob_in, blob_out, **kwargs):
# size of outputs can vary depending on kwargs
outputs = model.net.VideoInput(blob_in, blob_out, **kwargs)
return outputs
Reported by Pylint.
caffe2/contrib/playground/resnetdemo/caffe2_resnet50_default_forward.py
6 issues
Line: 9
Column: 47
import caffe2.python.models.resnet as resnet
def gen_forward_pass_builder_fun(self, model, dataset, is_train):
def create_resnet50_model_ops(model, loss_scale):
[softmax, loss] = resnet.create_resnet50(
model,
"data",
num_input_channels=3,
Reported by Pylint.
Line: 9
Column: 56
import caffe2.python.models.resnet as resnet
def gen_forward_pass_builder_fun(self, model, dataset, is_train):
def create_resnet50_model_ops(model, loss_scale):
[softmax, loss] = resnet.create_resnet50(
model,
"data",
num_input_channels=3,
Reported by Pylint.
Line: 9
Column: 40
import caffe2.python.models.resnet as resnet
def gen_forward_pass_builder_fun(self, model, dataset, is_train):
def create_resnet50_model_ops(model, loss_scale):
[softmax, loss] = resnet.create_resnet50(
model,
"data",
num_input_channels=3,
Reported by Pylint.
Line: 10
Column: 42
def gen_forward_pass_builder_fun(self, model, dataset, is_train):
def create_resnet50_model_ops(model, loss_scale):
[softmax, loss] = resnet.create_resnet50(
model,
"data",
num_input_channels=3,
num_labels=1000,
Reported by Pylint.
Line: 1
Column: 1
import caffe2.python.models.resnet as resnet
def gen_forward_pass_builder_fun(self, model, dataset, is_train):
Reported by Pylint.
Line: 9
Column: 1
import caffe2.python.models.resnet as resnet
def gen_forward_pass_builder_fun(self, model, dataset, is_train):
def create_resnet50_model_ops(model, loss_scale):
[softmax, loss] = resnet.create_resnet50(
model,
"data",
num_input_channels=3,
Reported by Pylint.
aten/src/ATen/native/LinearAlgebra.cpp
6 issues
Line: 369
Column: 81
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
// 'hermitian' controls whether SVD or eigendecomposition is used for computing the singular values
// 'atol' and 'rtol' are the absolute and relative tolerances, respectively.
// TODO: this function can be made public, see: https://github.com/pytorch/pytorch/issues/54151
static Tensor& linalg_matrix_rank_out_helper(const Tensor& input, const Tensor& atol, const Tensor& rtol, bool hermitian, Tensor& result) {
checkSameDevice("torch.linalg.matrix_rank", result, input);
checkSameDevice("torch.linalg.matrix_rank", atol, input, "atol");
checkSameDevice("torch.linalg.matrix_rank", rtol, input, "rtol");
ScalarType output_type = ScalarType::Long;
checkLinalgCompatibleDtype("torch.linalg.matrix_rank", result.scalar_type(), output_type);
Reported by FlawFinder.
Line: 371
Column: 47
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
// TODO: this function can be made public, see: https://github.com/pytorch/pytorch/issues/54151
static Tensor& linalg_matrix_rank_out_helper(const Tensor& input, const Tensor& atol, const Tensor& rtol, bool hermitian, Tensor& result) {
checkSameDevice("torch.linalg.matrix_rank", result, input);
checkSameDevice("torch.linalg.matrix_rank", atol, input, "atol");
checkSameDevice("torch.linalg.matrix_rank", rtol, input, "rtol");
ScalarType output_type = ScalarType::Long;
checkLinalgCompatibleDtype("torch.linalg.matrix_rank", result.scalar_type(), output_type);
// Matrices or batch of matrices are allowed
Reported by FlawFinder.
Line: 379
Column: 34
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
// Matrices or batch of matrices are allowed
TORCH_CHECK(input.dim() >= 2, "torch.linalg.matrix_rank: Expected as input a matrix or a batch of matrices, but got a tensor of size: ", input.sizes());
TORCH_CHECK(!at::isComplexType(atol.scalar_type()),
"torch.linalg.matrix_rank: atol tensor of complex type is not supported.");
TORCH_CHECK(!at::isComplexType(rtol.scalar_type()),
"torch.linalg.matrix_rank: rtol tensor of complex type is not supported.");
// matrix_rank assigns a scalar value for each matrix in the batch so
Reported by FlawFinder.
Line: 411
Column: 24
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
max_S = S.amax(/*dim=*/-1, /*keepdim=*/true);
}
Tensor tol = at::max(atol.unsqueeze(-1), rtol * max_S);
result = at::sum_out(result, S > tol, /*dim=*/-1);
return result;
}
Reported by FlawFinder.
Line: 427
Column: 10
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
Tensor& linalg_matrix_rank_out(const Tensor& input, optional<double> tol, bool hermitian, Tensor& result) {
double tol_value;
Tensor atol, rtol;
if (tol.has_value()) {
tol_value = tol.value();
// For NumPy compatibility tol is not scaled with max(singular_value) if the value for tol is provided
// It is assumed that the provided value is the absolute tolerance
atol = at::full({}, tol_value, input.options().dtype(ScalarType::Double));
Reported by FlawFinder.
Line: 443
Column: 49
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
rtol = at::full({}, tol_value, input.options().dtype(ScalarType::Double));
}
result = linalg_matrix_rank_out_helper(input, atol, rtol, hermitian, result);
return result;
}
Tensor linalg_matrix_rank(const Tensor& input, const Tensor& tol, bool hermitian) {
Tensor result = at::empty({0}, input.options().dtype(ScalarType::Long));
Reported by FlawFinder.
caffe2/python/hsm_util.py
6 issues
Line: 8
Column: 1
from caffe2.proto import hsm_pb2
'''
Hierarchical softmax utility methods that can be used to:
1) create TreeProto structure given list of word_ids or NodeProtos
2) create HierarchyProto structure using the user-inputted TreeProto
Reported by Pylint.
Line: 10
Column: 1
from caffe2.proto import hsm_pb2
'''
Hierarchical softmax utility methods that can be used to:
1) create TreeProto structure given list of word_ids or NodeProtos
2) create HierarchyProto structure using the user-inputted TreeProto
'''
Reported by Pylint.
Line: 1
Column: 1
## @package hsm_util
# Module caffe2.python.hsm_util
from caffe2.proto import hsm_pb2
Reported by Pylint.
Line: 17
Column: 1
'''
def create_node_with_words(words, name='node'):
node = hsm_pb2.NodeProto()
node.name = name
for word in words:
node.word_ids.append(word)
return node
Reported by Pylint.
Line: 25
Column: 1
return node
def create_node_with_nodes(nodes, name='node'):
node = hsm_pb2.NodeProto()
node.name = name
for child_node in nodes:
new_child_node = node.children.add()
new_child_node.MergeFrom(child_node)
Reported by Pylint.
Line: 34
Column: 1
return node
def create_hierarchy(tree_proto):
max_index = 0
def create_path(path, word):
path_proto = hsm_pb2.PathProto()
path_proto.word_id = word
Reported by Pylint.
aten/src/ATen/cuda/CUDAGeneratorImpl.cpp
6 issues
Line: 143
Column: 26
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
uint64_t CUDAGeneratorImpl::seed() {
at::cuda::assertNotCapturing("Cannot call CUDAGeneratorImpl::seed");
auto random = c10::detail::getNonDeterministicRandom(true);
this->set_current_seed(random);
return random;
}
/**
* Gets the current internal state of CUDAGeneratorImpl. The internal
Reported by FlawFinder.
Line: 144
Column: 10
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
at::cuda::assertNotCapturing("Cannot call CUDAGeneratorImpl::seed");
auto random = c10::detail::getNonDeterministicRandom(true);
this->set_current_seed(random);
return random;
}
/**
* Gets the current internal state of CUDAGeneratorImpl. The internal
* state is returned as a CPU byte tensor.
Reported by FlawFinder.
Line: 169
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
memset(rng_state, -1, states_size);
auto current_seed = this->current_seed();
auto offset = static_cast<int64_t>(this->philox_offset_per_thread()); // Note that old THCGeneratorState had offset as std::atomic<int64_t>
memcpy(rng_state + states_size, ¤t_seed, seed_size);
memcpy(rng_state + states_size + seed_size, &offset, offset_size);
return state_tensor.getIntrusivePtr();
}
Reported by FlawFinder.
Line: 170
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
auto current_seed = this->current_seed();
auto offset = static_cast<int64_t>(this->philox_offset_per_thread()); // Note that old THCGeneratorState had offset as std::atomic<int64_t>
memcpy(rng_state + states_size, ¤t_seed, seed_size);
memcpy(rng_state + states_size + seed_size, &offset, offset_size);
return state_tensor.getIntrusivePtr();
}
/**
Reported by FlawFinder.
Line: 199
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
uint64_t input_seed;
auto new_rng_state = new_state.data<uint8_t>();
memcpy(&input_seed, new_rng_state + states_size, seed_size);
this->set_current_seed(input_seed);
int64_t philox_offset = 0;
if (!no_philox_seed) {
memcpy(&philox_offset, new_rng_state + states_size + seed_size, offset_size);
}
Reported by FlawFinder.
Line: 203
Column: 5
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
this->set_current_seed(input_seed);
int64_t philox_offset = 0;
if (!no_philox_seed) {
memcpy(&philox_offset, new_rng_state + states_size + seed_size, offset_size);
}
this->set_philox_offset_per_thread(static_cast<uint64_t>(philox_offset));
}
/**
Reported by FlawFinder.
caffe2/python/crf_viterbi_test.py
6 issues
Line: 10
Column: 1
from caffe2.python.cnn import CNNModelHelper
from caffe2.python.crf_predict import crf_update_predictions
from caffe2.python.test_util import TestCase
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
class TestCrfDecode(TestCase):
Reported by Pylint.
Line: 11
Column: 1
from caffe2.python.crf_predict import crf_update_predictions
from caffe2.python.test_util import TestCase
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
class TestCrfDecode(TestCase):
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import workspace, crf
from caffe2.python.cnn import CNNModelHelper
from caffe2.python.crf_predict import crf_update_predictions
from caffe2.python.test_util import TestCase
Reported by Pylint.
Line: 15
Column: 1
import numpy as np
class TestCrfDecode(TestCase):
@given(num_tags=st.integers(2, 4), num_words=st.integers(2, 15))
@settings(deadline=2000)
def test_crf_viterbi(self, num_tags, num_words):
model = CNNModelHelper(name='external')
Reported by Pylint.
Line: 19
Column: 5
@given(num_tags=st.integers(2, 4), num_words=st.integers(2, 15))
@settings(deadline=2000)
def test_crf_viterbi(self, num_tags, num_words):
model = CNNModelHelper(name='external')
predictions = np.random.randn(num_words, num_tags).astype(np.float32)
transitions = np.random.uniform(
low=-1, high=1, size=(num_tags + 2, num_tags + 2)
).astype(np.float32)
Reported by Pylint.
Line: 19
Column: 5
@given(num_tags=st.integers(2, 4), num_words=st.integers(2, 15))
@settings(deadline=2000)
def test_crf_viterbi(self, num_tags, num_words):
model = CNNModelHelper(name='external')
predictions = np.random.randn(num_words, num_tags).astype(np.float32)
transitions = np.random.uniform(
low=-1, high=1, size=(num_tags + 2, num_tags + 2)
).astype(np.float32)
Reported by Pylint.
torch/nn/quantized/_reference/modules/linear.py
6 issues
Line: 13
Column: 24
this is useful when user want to use this module in other backends like Glow.
"""
def __init__(self, in_features, out_features, bias_=True,
dtype=torch.qint8):
super().__init__(in_features, out_features, bias_, dtype)
self._qweight, self._bias = self._packed_params._weight_bias()
del self._packed_params
def _get_name(self):
Reported by Pylint.
Line: 26
Column: 18
weight_dequant = self._qweight.dequantize()
float_result = F.linear(x_dequant, weight_dequant, self._bias)
# NEEDFIX: we don't have dtype in the Linear module APIs right now!
result = torch.quantize_per_tensor(
float_result, self.scale, self.zero_point, torch.quint8)
return result
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
Reported by Pylint.
Line: 27
Column: 56
float_result = F.linear(x_dequant, weight_dequant, self._bias)
# NEEDFIX: we don't have dtype in the Linear module APIs right now!
result = torch.quantize_per_tensor(
float_result, self.scale, self.zero_point, torch.quint8)
return result
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + '_qweight'] = self._qweight
Reported by Pylint.
Line: 1
Column: 1
import torch
import torch.nn.quantized as nnq
import torch.nn.functional as F
from typing import Optional
class Linear(nnq.Linear):
""" A backend independent version of nn.quantized.Linear
we will not pack the parameters in this module, since weight packing is an
optimization for quantized backends supported in PyTorch (fbgemm/qnnpack),
Reported by Pylint.
Line: 4
Column: 1
import torch
import torch.nn.quantized as nnq
import torch.nn.functional as F
from typing import Optional
class Linear(nnq.Linear):
""" A backend independent version of nn.quantized.Linear
we will not pack the parameters in this module, since weight packing is an
optimization for quantized backends supported in PyTorch (fbgemm/qnnpack),
Reported by Pylint.
Line: 35
Column: 5
destination[prefix + '_qweight'] = self._qweight
destination[prefix + '_bias'] = self._bias
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
self._qweight = state_dict[prefix + '_qweight']
self._bias = state_dict[prefix + '_bias']
state_dict.pop(prefix + '_qweight')
state_dict.pop(prefix + '_bias')
Reported by Pylint.
torch/package/_importlib.py
6 issues
Line: 53
Column: 23
raise ValueError("Empty module name")
def _calc___package__(globals):
"""Calculate what __package__ should be.
__package__ is not guaranteed to be defined or could be set to None
to represent that its proper value is unknown.
Reported by Pylint.
Line: 1
Column: 1
import _warnings
import os.path
# note: implementations
# copied from cpython's import code
# _zip_searchorder defines how we search for a module in the Zip
# archive: we first search for a package __init__, then for
Reported by Pylint.
Line: 2
Column: 1
import _warnings
import os.path
# note: implementations
# copied from cpython's import code
# _zip_searchorder defines how we search for a module in the Zip
# archive: we first search for a package __init__, then for
Reported by Pylint.
Line: 43
Column: 9
if level < 0:
raise ValueError("level must be >= 0")
if level > 0:
if not isinstance(package, str):
raise TypeError("__package__ not set to a string")
elif not package:
raise ImportError(
"attempted relative import with no known parent " "package"
)
Reported by Pylint.
Line: 62
Column: 5
"""
package = globals.get("__package__")
spec = globals.get("__spec__")
if package is not None:
if spec is not None and package != spec.parent:
_warnings.warn(
"__package__ != __spec__.parent " f"({package!r} != {spec.parent!r})",
ImportWarning,
stacklevel=3,
Reported by Pylint.
Line: 91
Column: 5
If the resulting string contains path separators, an exception is raised.
"""
parent, file_name = os.path.split(path)
if parent:
raise ValueError("{!r} must be only a file name".format(path))
else:
return file_name
Reported by Pylint.
torch/torch_version.py
6 issues
Line: 9
Column: 1
Version = packaging.version.Version
InvalidVersion = packaging.version.InvalidVersion
from .version import __version__ as internal_version
@total_ordering
class TorchVersion(str):
"""A string with magic powers to compare to both Version and iterables!
Reported by Pylint.
Line: 1
Column: 1
from functools import total_ordering
from typing import Iterable, Union
from pkg_resources import packaging # type: ignore[attr-defined]
Version = packaging.version.Version
InvalidVersion = packaging.version.InvalidVersion
from .version import __version__ as internal_version
Reported by Pylint.
Line: 9
Column: 1
Version = packaging.version.Version
InvalidVersion = packaging.version.InvalidVersion
from .version import __version__ as internal_version
@total_ordering
class TorchVersion(str):
"""A string with magic powers to compare to both Version and iterables!
Reported by Pylint.
Line: 35
Column: 5
TorchVersion('1.10.0a') > '1.2.1'
"""
# fully qualified type names here to appease mypy
def _convert_to_version(self, inp: Union[packaging.version.Version, str, Iterable]) -> packaging.version.Version:
if isinstance(inp, Version):
return inp
elif isinstance(inp, str):
return Version(inp)
elif isinstance(inp, Iterable):
Reported by Pylint.
Line: 35
Column: 1
TorchVersion('1.10.0a') > '1.2.1'
"""
# fully qualified type names here to appease mypy
def _convert_to_version(self, inp: Union[packaging.version.Version, str, Iterable]) -> packaging.version.Version:
if isinstance(inp, Version):
return inp
elif isinstance(inp, str):
return Version(inp)
elif isinstance(inp, Iterable):
Reported by Pylint.
Line: 36
Column: 9
"""
# fully qualified type names here to appease mypy
def _convert_to_version(self, inp: Union[packaging.version.Version, str, Iterable]) -> packaging.version.Version:
if isinstance(inp, Version):
return inp
elif isinstance(inp, str):
return Version(inp)
elif isinstance(inp, Iterable):
# Ideally this should work for most cases by attempting to group
Reported by Pylint.