The following issues were found
torch/fx/annotate.py
5 issues
Line: 4
Column: 19
from torch.fx.proxy import Proxy
def annotate(val, type):
# val could be either a regular value (not tracing)
# or fx.Proxy (tracing)
if isinstance(val, Proxy):
if val.node.type:
raise RuntimeError(f"Tried to annotate a value that already had a type on it!"
Reported by Pylint.
Line: 1
Column: 1
from torch.fx.proxy import Proxy
def annotate(val, type):
# val could be either a regular value (not tracing)
# or fx.Proxy (tracing)
if isinstance(val, Proxy):
if val.node.type:
raise RuntimeError(f"Tried to annotate a value that already had a type on it!"
Reported by Pylint.
Line: 4
Column: 1
from torch.fx.proxy import Proxy
def annotate(val, type):
# val could be either a regular value (not tracing)
# or fx.Proxy (tracing)
if isinstance(val, Proxy):
if val.node.type:
raise RuntimeError(f"Tried to annotate a value that already had a type on it!"
Reported by Pylint.
Line: 7
Column: 5
def annotate(val, type):
# val could be either a regular value (not tracing)
# or fx.Proxy (tracing)
if isinstance(val, Proxy):
if val.node.type:
raise RuntimeError(f"Tried to annotate a value that already had a type on it!"
f" Existing type is {val.node.type} "
f"and new type is {type}. "
f"This could happen if you tried to annotate a function parameter "
Reported by Pylint.
Line: 8
Column: 9
# val could be either a regular value (not tracing)
# or fx.Proxy (tracing)
if isinstance(val, Proxy):
if val.node.type:
raise RuntimeError(f"Tried to annotate a value that already had a type on it!"
f" Existing type is {val.node.type} "
f"and new type is {type}. "
f"This could happen if you tried to annotate a function parameter "
f"value (in which case you should use the type slot "
Reported by Pylint.
torch/csrc/jit/tensorexpr/ir_simplifier.cpp
5 issues
Line: 1354
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
}
bool equal = immediateEquals(diff, 0);
bool lhsSmaller = !equal && !immediateIsNegative(diff);
switch (v->compare_select_op()) {
case CompareSelectOperation::kEQ:
return equal ? true_branch : false_branch;
case CompareSelectOperation::kGT:
Reported by FlawFinder.
Line: 1358
Column: 14
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
switch (v->compare_select_op()) {
case CompareSelectOperation::kEQ:
return equal ? true_branch : false_branch;
case CompareSelectOperation::kGT:
return (lhsSmaller || equal) ? false_branch : true_branch;
case CompareSelectOperation::kGE:
return lhsSmaller ? false_branch : true_branch;
case CompareSelectOperation::kLT:
Reported by FlawFinder.
Line: 1360
Column: 29
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
case CompareSelectOperation::kEQ:
return equal ? true_branch : false_branch;
case CompareSelectOperation::kGT:
return (lhsSmaller || equal) ? false_branch : true_branch;
case CompareSelectOperation::kGE:
return lhsSmaller ? false_branch : true_branch;
case CompareSelectOperation::kLT:
return lhsSmaller ? true_branch : false_branch;
case CompareSelectOperation::kLE:
Reported by FlawFinder.
Line: 1366
Column: 29
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
case CompareSelectOperation::kLT:
return lhsSmaller ? true_branch : false_branch;
case CompareSelectOperation::kLE:
return (lhsSmaller || equal) ? true_branch : false_branch;
case CompareSelectOperation::kNE:
return equal ? false_branch : true_branch;
}
// should not be possible but just in case.
Reported by FlawFinder.
Line: 1368
Column: 14
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
case CompareSelectOperation::kLE:
return (lhsSmaller || equal) ? true_branch : false_branch;
case CompareSelectOperation::kNE:
return equal ? false_branch : true_branch;
}
// should not be possible but just in case.
return alloc<CompareSelect>(
lhs_new,
Reported by FlawFinder.
torch/__future__.py
5 issues
Line: 15
Column: 5
_overwrite_module_params_on_conversion = False
def set_overwrite_module_params_on_conversion(value):
global _overwrite_module_params_on_conversion
_overwrite_module_params_on_conversion = value
def get_overwrite_module_params_on_conversion():
return _overwrite_module_params_on_conversion
Reported by Pylint.
Line: 12
Column: 1
Default: False
"""
_overwrite_module_params_on_conversion = False
def set_overwrite_module_params_on_conversion(value):
global _overwrite_module_params_on_conversion
_overwrite_module_params_on_conversion = value
Reported by Pylint.
Line: 14
Column: 1
"""
_overwrite_module_params_on_conversion = False
def set_overwrite_module_params_on_conversion(value):
global _overwrite_module_params_on_conversion
_overwrite_module_params_on_conversion = value
def get_overwrite_module_params_on_conversion():
return _overwrite_module_params_on_conversion
Reported by Pylint.
Line: 15
Column: 5
_overwrite_module_params_on_conversion = False
def set_overwrite_module_params_on_conversion(value):
global _overwrite_module_params_on_conversion
_overwrite_module_params_on_conversion = value
def get_overwrite_module_params_on_conversion():
return _overwrite_module_params_on_conversion
Reported by Pylint.
Line: 18
Column: 1
global _overwrite_module_params_on_conversion
_overwrite_module_params_on_conversion = value
def get_overwrite_module_params_on_conversion():
return _overwrite_module_params_on_conversion
Reported by Pylint.
torch/fx/experimental/fx2trt/converters/adaptive_avgpool.py
5 issues
Line: 2
Column: 1
import torch
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import mark_as_int8_layer, extend_attr_to_tuple
@tensorrt_converter(torch.nn.modules.pooling.AdaptiveAvgPool2d)
def adaptive_avgpool2d(network, submod, args, kwargs, name):
# args/kwargs should have already been normalized to kwargs
Reported by Pylint.
Line: 5
Column: 1
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import mark_as_int8_layer, extend_attr_to_tuple
@tensorrt_converter(torch.nn.modules.pooling.AdaptiveAvgPool2d)
def adaptive_avgpool2d(network, submod, args, kwargs, name):
# args/kwargs should have already been normalized to kwargs
assert len(args) == 0
Reported by Pylint.
Line: 1
Column: 1
import torch
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import mark_as_int8_layer, extend_attr_to_tuple
@tensorrt_converter(torch.nn.modules.pooling.AdaptiveAvgPool2d)
def adaptive_avgpool2d(network, submod, args, kwargs, name):
# args/kwargs should have already been normalized to kwargs
Reported by Pylint.
Line: 8
Column: 1
from .helper_functions import mark_as_int8_layer, extend_attr_to_tuple
@tensorrt_converter(torch.nn.modules.pooling.AdaptiveAvgPool2d)
def adaptive_avgpool2d(network, submod, args, kwargs, name):
# args/kwargs should have already been normalized to kwargs
assert len(args) == 0
input_val = kwargs["input"]
if not isinstance(input_val, trt.tensorrt.ITensor):
Reported by Pylint.
Line: 10
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
@tensorrt_converter(torch.nn.modules.pooling.AdaptiveAvgPool2d)
def adaptive_avgpool2d(network, submod, args, kwargs, name):
# args/kwargs should have already been normalized to kwargs
assert len(args) == 0
input_val = kwargs["input"]
if not isinstance(input_val, trt.tensorrt.ITensor):
raise RuntimeError(f"AdaptiveAvgPool2d received input {input_val} that is not part "
"of the TensorRT region!")
Reported by Bandit.
torch/csrc/distributed/c10d/GlooDeviceFactory.cpp
5 issues
Line: 87
Column: 45
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
} else {
attr.hostname = hostname;
}
const auto pkey = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_PKEY"));
const auto cert = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_CERT"));
const auto caFile = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_CA_FILE"));
const auto caPath = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_CA_PATH"));
return ::gloo::transport::tcp::tls::CreateDevice(attr, pkey, cert, caFile, caPath);
}
Reported by FlawFinder.
Line: 88
Column: 45
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
attr.hostname = hostname;
}
const auto pkey = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_PKEY"));
const auto cert = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_CERT"));
const auto caFile = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_CA_FILE"));
const auto caPath = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_CA_PATH"));
return ::gloo::transport::tcp::tls::CreateDevice(attr, pkey, cert, caFile, caPath);
}
Reported by FlawFinder.
Line: 89
Column: 47
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
}
const auto pkey = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_PKEY"));
const auto cert = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_CERT"));
const auto caFile = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_CA_FILE"));
const auto caPath = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_CA_PATH"));
return ::gloo::transport::tcp::tls::CreateDevice(attr, pkey, cert, caFile, caPath);
}
C10_REGISTER_CREATOR(GlooDeviceRegistry, TCP_TLS, makeTCPTLSDevice);
Reported by FlawFinder.
Line: 90
Column: 47
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
const auto pkey = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_PKEY"));
const auto cert = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_CERT"));
const auto caFile = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_CA_FILE"));
const auto caPath = cstr_to_std_string(std::getenv("GLOO_DEVICE_TRANSPORT_TCP_TLS_CA_PATH"));
return ::gloo::transport::tcp::tls::CreateDevice(attr, pkey, cert, caFile, caPath);
}
C10_REGISTER_CREATOR(GlooDeviceRegistry, TCP_TLS, makeTCPTLSDevice);
#endif
Reported by FlawFinder.
Line: 127
Column: 31
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
std::shared_ptr<::gloo::transport::Device>
makeGlooDevice(const std::string& interfaceName, const std::string& hostName)
{
static auto transportName = getenv("GLOO_DEVICE_TRANSPORT");
if (transportName) {
return GlooDeviceRegistry()->Create(transportName, interfaceName, hostName);
}
#ifdef __linux__
Reported by FlawFinder.
test/package/package_b/subpackage_0/subsubpackage_0/__init__.py
5 issues
Line: 1
Column: 1
__import__("subpackage_1", globals(), locals(), ["PackageBSubpackage1Object_0"], 3)
result = "subsubpackage_0"
class PackageBSubsubpackage0Object_0:
pass
Reported by Pylint.
Line: 3
Column: 1
__import__("subpackage_1", globals(), locals(), ["PackageBSubpackage1Object_0"], 3)
result = "subsubpackage_0"
class PackageBSubsubpackage0Object_0:
pass
Reported by Pylint.
Line: 6
Column: 1
result = "subsubpackage_0"
class PackageBSubsubpackage0Object_0:
pass
Reported by Pylint.
Line: 6
Column: 1
result = "subsubpackage_0"
class PackageBSubsubpackage0Object_0:
pass
Reported by Pylint.
Line: 6
Column: 1
result = "subsubpackage_0"
class PackageBSubsubpackage0Object_0:
pass
Reported by Pylint.
torch/ao/sparsity/scheduler/lambda_scheduler.py
5 issues
Line: 3
Column: 1
import warnings
from .base_scheduler import BaseScheduler
class LambdaSL(BaseScheduler):
"""Sets the sparsity level of each parameter group to the final sl
times a given function. When last_epoch=-1, sets initial sl as zero.
Args:
sparsifier (BaseSparsifier): Wrapped sparsifier.
Reported by Pylint.
Line: 1
Column: 1
import warnings
from .base_scheduler import BaseScheduler
class LambdaSL(BaseScheduler):
"""Sets the sparsity level of each parameter group to the final sl
times a given function. When last_epoch=-1, sets initial sl as zero.
Args:
sparsifier (BaseSparsifier): Wrapped sparsifier.
Reported by Pylint.
Line: 5
Column: 1
from .base_scheduler import BaseScheduler
class LambdaSL(BaseScheduler):
"""Sets the sparsity level of each parameter group to the final sl
times a given function. When last_epoch=-1, sets initial sl as zero.
Args:
sparsifier (BaseSparsifier): Wrapped sparsifier.
sl_lambda (function or list): A function which computes a multiplicative
Reported by Pylint.
Line: 37
Column: 9
raise ValueError("Expected {} lr_lambdas, but got {}".format(
len(sparsifier.module_groups), len(sl_lambda)))
self.sl_lambdas = list(sl_lambda)
super(LambdaSL, self).__init__(sparsifier, last_epoch, verbose)
def get_sl(self):
if not self._get_sl_called_within_step:
warnings.warn(
"To get the last sparsity level computed by the scheduler, "
Reported by Pylint.
Line: 39
Column: 5
self.sl_lambdas = list(sl_lambda)
super(LambdaSL, self).__init__(sparsifier, last_epoch, verbose)
def get_sl(self):
if not self._get_sl_called_within_step:
warnings.warn(
"To get the last sparsity level computed by the scheduler, "
"please use `get_last_sl()`.")
return [base_sl * lmbda(self.last_epoch)
Reported by Pylint.
torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp
5 issues
Line: 59
Column: 11
CWE codes:
78
Suggestion:
try using a library call that implements the same functionality if available
#ifdef _MSC_VER
return (run(cmd.c_str()) == 0);
#else
return (system(cmd.c_str()) == 0);
#endif
}
#ifdef _MSC_VER
c10::optional<std::wstring> exec(const std::wstring& cmd) {
Reported by FlawFinder.
Line: 279
Column: 11
CWE codes:
78
Suggestion:
try using a library call that implements the same functionality if available
#ifdef _MSC_VER
intptr_t r = run(result);
#else
int r = system(result.c_str());
#endif
if (config.openmp && r != 0) {
std::cerr
<< "warning: pytorch jit fuser failed to compile with openmp, trying without it...\n";
config.openmp = false; // disable for future compiles
Reported by FlawFinder.
Line: 300
Column: 11
CWE codes:
78
Suggestion:
try using a library call that implements the same functionality if available
TemplateEnv env;
env.s("so_file", so_file);
std::string cmd = format(disas_string, env);
int r = system(cmd.c_str());
AT_ASSERT(r == 0);
}
FusedKernelCPU::FusedKernelCPU(
std::string name,
Reported by FlawFinder.
Line: 181
Column: 27
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
// of compilation attempts.
struct CompilerConfig {
CompilerConfig() {
const char* cxx_env = getenv("CXX");
if (cxx_env != nullptr) {
cxx = cxx_env;
}
#ifdef _MSC_VER
Reported by FlawFinder.
Line: 24
Column: 3
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
#ifdef _MSC_VER
static const std::string getTempPath() {
wchar_t lpTempPathBuffer[MAX_PATH];
DWORD dwRetVal = GetTempPathW(
MAX_PATH, // length of the buffer
lpTempPathBuffer); // buffer for path
Reported by FlawFinder.
caffe2/python/layers/batch_huber_loss.py
5 issues
Line: 1
Column: 1
# @package batch_huber_loss
# Module caffe2.python.layers.batch_huber_loss
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
Reported by Pylint.
Line: 18
Column: 1
import numpy as np
class BatchHuberLoss(ModelLayer):
def __init__(self, model, input_record, name='batch_huber_loss', delta=1.0, **kwargs):
super(BatchHuberLoss, self).__init__(model, name, input_record, **kwargs)
assert delta > 0
Reported by Pylint.
Line: 21
Column: 9
class BatchHuberLoss(ModelLayer):
def __init__(self, model, input_record, name='batch_huber_loss', delta=1.0, **kwargs):
super(BatchHuberLoss, self).__init__(model, name, input_record, **kwargs)
assert delta > 0
self._delta = delta
Reported by Pylint.
Line: 23
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def __init__(self, model, input_record, name='batch_huber_loss', delta=1.0, **kwargs):
super(BatchHuberLoss, self).__init__(model, name, input_record, **kwargs)
assert delta > 0
self._delta = delta
assert schema.is_schema_subset(
schema.Struct(
Reported by Bandit.
Line: 27
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self._delta = delta
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('prediction', schema.Scalar())
),
input_record
Reported by Bandit.
aten/src/ATen/native/TensorCompare.cpp
5 issues
Line: 92
Column: 76
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
DEFINE_DISPATCH(clamp_max_scalar_stub); // NOLINT(cppcoreguidelines-avoid-non-const-global-variables)
DEFINE_DISPATCH(isin_default_stub); // NOLINT(cppcoreguidelines-avoid-non-const-global-variables)
bool allclose(const Tensor& self, const Tensor& other, double rtol, double atol, bool equal_nan) {
return at::isclose(self, other, rtol, atol, equal_nan).all().item<uint8_t>();
}
// Note [closeness]
// A number A is close to B when either:
Reported by FlawFinder.
Line: 93
Column: 41
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
DEFINE_DISPATCH(isin_default_stub); // NOLINT(cppcoreguidelines-avoid-non-const-global-variables)
bool allclose(const Tensor& self, const Tensor& other, double rtol, double atol, bool equal_nan) {
return at::isclose(self, other, rtol, atol, equal_nan).all().item<uint8_t>();
}
// Note [closeness]
// A number A is close to B when either:
//
Reported by FlawFinder.
Line: 109
Column: 77
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
// TODO: use bitwise operator overloads once we add them
// TODO: revisit complex inputs and equal_nan=true after
// https://github.com/numpy/numpy/issues/15959 is resolved
Tensor isclose(const Tensor& self, const Tensor& other, double rtol, double atol, bool equal_nan) {
TORCH_CHECK(self.scalar_type() == other.scalar_type(), self.scalar_type(), " did not match ", other.scalar_type());
TORCH_CHECK(!(self.is_complex() && equal_nan),
"isclose with equal_nan=True is not supported for complex inputs.");
TORCH_CHECK(!(self.is_quantized() || other.is_quantized()),
"isclose is not supported for quantized inputs.");
Reported by FlawFinder.
Line: 120
Column: 82
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
// Note: consistent with Python's isclose but divergent from NumPy's, which
// allows negative atol and rtol.
TORCH_CHECK(rtol >= 0, "rtol must be greater than or equal to zero, but got ", rtol);
TORCH_CHECK(atol >= 0, "atol must be greater than or equal to zero, but got ", atol);
// Computes equality closeness
Tensor close = self == other;
if (equal_nan && self.is_floating_point()) {
close.__ior__((self != self).__iand__(other != other));
Reported by FlawFinder.
Line: 120
Column: 15
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
// Note: consistent with Python's isclose but divergent from NumPy's, which
// allows negative atol and rtol.
TORCH_CHECK(rtol >= 0, "rtol must be greater than or equal to zero, but got ", rtol);
TORCH_CHECK(atol >= 0, "atol must be greater than or equal to zero, but got ", atol);
// Computes equality closeness
Tensor close = self == other;
if (equal_nan && self.is_floating_point()) {
close.__ior__((self != self).__iand__(other != other));
Reported by FlawFinder.