The following issues were found
caffe2/python/normalizer_test.py
5 issues
Line: 15
Column: 13
bn = BatchNormalizer(momentum=0.1)
with UseNormalizer({'BATCH': bn}):
normalizer = NormalizerContext.current().get_normalizer('BATCH')
self.assertEquals(bn, normalizer)
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python.normalizer_context import UseNormalizer, NormalizerContext
from caffe2.python.normalizer import BatchNormalizer
from caffe2.python.layer_test_util import LayersTestCase
Reported by Pylint.
Line: 10
Column: 1
from caffe2.python.layer_test_util import LayersTestCase
class TestNormalizerContext(LayersTestCase):
def test_normalizer_context(self):
bn = BatchNormalizer(momentum=0.1)
with UseNormalizer({'BATCH': bn}):
normalizer = NormalizerContext.current().get_normalizer('BATCH')
self.assertEquals(bn, normalizer)
Reported by Pylint.
Line: 11
Column: 5
class TestNormalizerContext(LayersTestCase):
def test_normalizer_context(self):
bn = BatchNormalizer(momentum=0.1)
with UseNormalizer({'BATCH': bn}):
normalizer = NormalizerContext.current().get_normalizer('BATCH')
self.assertEquals(bn, normalizer)
Reported by Pylint.
Line: 12
Column: 9
class TestNormalizerContext(LayersTestCase):
def test_normalizer_context(self):
bn = BatchNormalizer(momentum=0.1)
with UseNormalizer({'BATCH': bn}):
normalizer = NormalizerContext.current().get_normalizer('BATCH')
self.assertEquals(bn, normalizer)
Reported by Pylint.
test/mobile/nnc/test_nnc_backend.cpp
5 issues
Line: 110
std::vector<int64_t> data_;
};
TORCH_LIBRARY(_TorchScriptTesting, m) {
m.class_<FakeTensor>("_MobileNNCFakeTensor")
.def(torch::init<std::vector<int64_t>>())
.def("get", &FakeTensor::get)
.def_pickle(
[](c10::intrusive_ptr<FakeTensor> self) { // __getstate__
Reported by Cppcheck.
Line: 186
Column: 33
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
// Load and run the saved model.
auto loaded_module = _load_for_mobile(ss);
auto result = loaded_module.forward(inputs);
EXPECT_TRUE(result.toTensor().equal(3.0 * torch::ones({4, 4})));
EXPECT_TRUE(result.toTensor().equal(reference.toTensor()));
}
TEST(NNCBackendTest, FakeTensor) {
script::Module m("m");
Reported by FlawFinder.
Line: 187
Column: 33
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto loaded_module = _load_for_mobile(ss);
auto result = loaded_module.forward(inputs);
EXPECT_TRUE(result.toTensor().equal(3.0 * torch::ones({4, 4})));
EXPECT_TRUE(result.toTensor().equal(reference.toTensor()));
}
TEST(NNCBackendTest, FakeTensor) {
script::Module m("m");
auto param_cls = getCustomClass(
Reported by FlawFinder.
Line: 229
Column: 33
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
// Load and run the saved model.
auto loaded_module = _load_for_mobile(ss);
auto result = loaded_module.forward(inputs);
EXPECT_TRUE(result.toTensor().equal(5.0 * torch::ones({4, 4})));
EXPECT_TRUE(result.toTensor().equal(reference.toTensor()));
}
} // namespace nnc
} // namespace mobile
Reported by FlawFinder.
Line: 230
Column: 33
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto loaded_module = _load_for_mobile(ss);
auto result = loaded_module.forward(inputs);
EXPECT_TRUE(result.toTensor().equal(5.0 * torch::ones({4, 4})));
EXPECT_TRUE(result.toTensor().equal(reference.toTensor()));
}
} // namespace nnc
} // namespace mobile
} // namespace jit
Reported by FlawFinder.
caffe2/python/normalizer_context.py
5 issues
Line: 1
Column: 1
# @package regularizer_context
# Module caffe2.python.normalizer_context
from caffe2.python import context
from caffe2.python.modifier_context import (
Reported by Pylint.
Line: 18
Column: 5
provide context to allow param_info to have different normalizers
"""
def has_normalizer(self, name):
return self._has_modifier(name)
def get_normalizer(self, name):
assert self.has_normalizer(name), (
"{} normalizer is not provided!".format(name))
Reported by Pylint.
Line: 21
Column: 5
def has_normalizer(self, name):
return self._has_modifier(name)
def get_normalizer(self, name):
assert self.has_normalizer(name), (
"{} normalizer is not provided!".format(name))
return self._get_modifier(name)
Reported by Pylint.
Line: 22
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
return self._has_modifier(name)
def get_normalizer(self, name):
assert self.has_normalizer(name), (
"{} normalizer is not provided!".format(name))
return self._get_modifier(name)
class UseNormalizer(UseModifierBase):
Reported by Bandit.
Line: 27
Column: 1
return self._get_modifier(name)
class UseNormalizer(UseModifierBase):
'''
context class to allow setting the current context.
Example usage with layer:
normalizers = {'norm1': norm1, 'norm2': norm2}
with UseNormalizer(normalizers):
Reported by Pylint.
caffe2/python/optimizer_context.py
5 issues
Line: 1
Column: 1
## @package optimizer_context
# Module caffe2.python.optimizer_context
from caffe2.python import context
from caffe2.python.modifier_context import (
Reported by Pylint.
Line: 21
Column: 5
provide context to allow param_info to have different optimizers
"""
def has_optimizer(self, name):
return self._has_modifier(name)
def get_optimizer(self, name):
assert self.has_optimizer(name), (
"{} optimizer is not provided!".format(name))
Reported by Pylint.
Line: 24
Column: 5
def has_optimizer(self, name):
return self._has_modifier(name)
def get_optimizer(self, name):
assert self.has_optimizer(name), (
"{} optimizer is not provided!".format(name))
return self._get_modifier(name)
Reported by Pylint.
Line: 25
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
return self._has_modifier(name)
def get_optimizer(self, name):
assert self.has_optimizer(name), (
"{} optimizer is not provided!".format(name))
return self._get_modifier(name)
class UseOptimizer(UseModifierBase):
Reported by Bandit.
Line: 30
Column: 1
return self._get_modifier(name)
class UseOptimizer(UseModifierBase):
'''
context class to allow setting the current context.
Example usage with brew:
- with UseOptimizer(optim):
brew.func
Reported by Pylint.
test/cpp/api/imethod.cpp
5 issues
Line: 13
Column: 40
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
// TODO(T96218435): Enable the following tests in OSS.
TEST(IMethodTest, CallMethod) {
auto script_model = torch::jit::load(getenv("SIMPLE_JIT"));
auto script_method = script_model.get_method("forward");
torch::deploy::InterpreterManager manager(3);
torch::deploy::Package p = manager.load_package(getenv("SIMPLE"));
auto py_model = p.load_pickle("model", "model.pkl");
Reported by FlawFinder.
Line: 17
Column: 51
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
auto script_method = script_model.get_method("forward");
torch::deploy::InterpreterManager manager(3);
torch::deploy::Package p = manager.load_package(getenv("SIMPLE"));
auto py_model = p.load_pickle("model", "model.pkl");
torch::deploy::PythonMethodWrapper py_method(py_model, "forward");
auto input = torch::ones({10, 20});
auto output_py = py_method({input});
Reported by FlawFinder.
Line: 34
Column: 39
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
}
TEST(IMethodTest, GetArgumentNames) {
auto scriptModel = torch::jit::load(getenv("SIMPLE_JIT"));
auto scriptMethod = scriptModel.get_method("forward");
auto& scriptNames = scriptMethod.getArgumentNames();
EXPECT_EQ(scriptNames.size(), 1);
EXPECT_STREQ(scriptNames[0].c_str(), "input");
Reported by FlawFinder.
Line: 42
Column: 57
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
EXPECT_STREQ(scriptNames[0].c_str(), "input");
torch::deploy::InterpreterManager manager(3);
torch::deploy::Package package = manager.load_package(getenv("SIMPLE"));
auto pyModel = package.load_pickle("model", "model.pkl");
torch::deploy::PythonMethodWrapper pyMethod(pyModel, "forward");
auto& pyNames = pyMethod.getArgumentNames();
EXPECT_EQ(pyNames.size(), 1);
Reported by FlawFinder.
Line: 29
Column: 32
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto output_py_tensor = output_py.toTensor();
auto output_script_tensor = output_script.toTensor();
EXPECT_TRUE(output_py_tensor.equal(output_script_tensor));
EXPECT_EQ(output_py_tensor.numel(), 200);
}
TEST(IMethodTest, GetArgumentNames) {
auto scriptModel = torch::jit::load(getenv("SIMPLE_JIT"));
Reported by FlawFinder.
test/typing/pass/creation_ops.py
5 issues
Line: 2
Column: 1
# flake8: noqa
import torch
from torch.testing._internal.common_utils import TEST_NUMPY
if TEST_NUMPY:
import numpy as np
# From the docs, there are quite a few ways to create a tensor:
# https://pytorch.org/docs/stable/tensors.html
Reported by Pylint.
Line: 3
Column: 1
# flake8: noqa
import torch
from torch.testing._internal.common_utils import TEST_NUMPY
if TEST_NUMPY:
import numpy as np
# From the docs, there are quite a few ways to create a tensor:
# https://pytorch.org/docs/stable/tensors.html
Reported by Pylint.
Line: 110
Column: 1
torch.complex(real, imag)
# torch.polar
abs = torch.tensor([1, 2], dtype=torch.float64)
pi = torch.acos(torch.zeros(1)).item() * 2
angle = torch.tensor([pi / 2, 5 * pi / 4], dtype=torch.float64)
torch.polar(abs, angle)
# torch.heaviside
Reported by Pylint.
Line: 1
Column: 1
# flake8: noqa
import torch
from torch.testing._internal.common_utils import TEST_NUMPY
if TEST_NUMPY:
import numpy as np
# From the docs, there are quite a few ways to create a tensor:
# https://pytorch.org/docs/stable/tensors.html
Reported by Pylint.
Line: 99
Column: 1
# torch.quantize_per_channel
x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
quant = torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
# torch.dequantize
torch.dequantize(x)
# torch.complex
Reported by Pylint.
torch/distributed/argparse_util.py
5 issues
Line: 1
Column: 1
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from argparse import Action
Reported by Pylint.
Line: 12
Column: 1
from argparse import Action
class env(Action):
"""
Gets argument values from ``PET_{dest}`` before defaulting
to the given ``default`` value. For flags (e.g. ``--standalone``)
use ``check_env`` instead.
Reported by Pylint.
Line: 12
Column: 1
from argparse import Action
class env(Action):
"""
Gets argument values from ``PET_{dest}`` before defaulting
to the given ``default`` value. For flags (e.g. ``--standalone``)
use ``check_env`` instead.
Reported by Pylint.
Line: 60
Column: 1
setattr(namespace, self.dest, values)
class check_env(Action):
"""
For flags, checks whether the env var ``PET_{dest}`` exists
before defaulting to the given ``default`` value. Equivalent to
``store_true`` argparse built-in action except that the argument can
be omitted from the commandline if the env var is present and has a
Reported by Pylint.
Line: 60
Column: 1
setattr(namespace, self.dest, values)
class check_env(Action):
"""
For flags, checks whether the env var ``PET_{dest}`` exists
before defaulting to the given ``default`` value. Equivalent to
``store_true`` argparse built-in action except that the argument can
be omitted from the commandline if the env var is present and has a
Reported by Pylint.
torch/autograd/_functions/utils.py
5 issues
Line: 37
Column: 5
supported = True
len1 = len(dims1)
len2 = len(dims2)
numel1 = reduce(lambda x, y: x * y, dims1)
numel2 = reduce(lambda x, y: x * y, dims2)
if len1 < len2:
broadcast = True
if numel2 != 1:
supported = False
Reported by Pylint.
Line: 1
Column: 1
from functools import reduce
def maybe_view(tensor, size, check_same_size=True):
if check_same_size and tensor.size() == size:
return tensor
return tensor.contiguous().view(size)
Reported by Pylint.
Line: 4
Column: 1
from functools import reduce
def maybe_view(tensor, size, check_same_size=True):
if check_same_size and tensor.size() == size:
return tensor
return tensor.contiguous().view(size)
Reported by Pylint.
Line: 10
Column: 1
return tensor.contiguous().view(size)
def maybe_unexpand(tensor, old_size, check_same_size=True):
if check_same_size and tensor.size() == old_size:
return tensor
num_unsqueezed = tensor.dim() - len(old_size)
expanded_dims = [dim for dim, (expanded, original)
in enumerate(zip(tensor.size()[num_unsqueezed:], old_size))
Reported by Pylint.
Line: 32
Column: 1
# 1) Only one element in dims2, such as dims2 = [1, 1]
# 2) dims2 is suffix of dims1, such as dims1 = [2, 3, 4], and dims2 = [3, 4]
# Details can be found here: https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gemm
def check_onnx_broadcast(dims1, dims2):
broadcast = False
supported = True
len1 = len(dims1)
len2 = len(dims2)
numel1 = reduce(lambda x, y: x * y, dims1)
Reported by Pylint.
torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py
5 issues
Line: 21
Column: 1
construct_and_record_rdzv_event,
)
from .api import (
RendezvousConnectionError,
RendezvousError,
RendezvousParameters,
RendezvousStateError,
)
Reported by Pylint.
Line: 27
Column: 1
RendezvousParameters,
RendezvousStateError,
)
from .dynamic_rendezvous import RendezvousBackend, Token
from .utils import _matches_machine_hostname, parse_rendezvous_endpoint
log = logging.getLogger(__name__)
Reported by Pylint.
Line: 28
Column: 1
RendezvousStateError,
)
from .dynamic_rendezvous import RendezvousBackend, Token
from .utils import _matches_machine_hostname, parse_rendezvous_endpoint
log = logging.getLogger(__name__)
class C10dRendezvousBackend(RendezvousBackend):
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import binascii
import logging
import os
Reported by Pylint.
Line: 256
Column: 5
backend = C10dRendezvousBackend(store, params.run_id)
except Exception as e:
construct_and_record_rdzv_event(
message=f"{type(e).__name__}: {str(e)}",
run_id=params.run_id,
node_state=NodeState.FAILED,
)
Reported by Pylint.
torch/csrc/distributed/c10d/Utils.cpp
5 issues
Line: 28
Column: 30
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
const char* kDistDebugOffLogLevel = "OFF";
std::string parse_env(const char* env_var_name) {
char* stringValue = std::getenv(env_var_name);
std::string res = "N/A";
if (stringValue != nullptr) {
res = stringValue;
}
return res;
Reported by FlawFinder.
Line: 129
Column: 3
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
} // namespace
std::string sockaddrToString(struct ::sockaddr* addr) {
char address[INET6_ADDRSTRLEN + 1];
if (addr->sa_family == AF_INET) {
struct ::sockaddr_in* s = reinterpret_cast<struct ::sockaddr_in*>(addr);
SYSCHECK(
::inet_ntop(AF_INET, &(s->sin_addr), address, INET_ADDRSTRLEN),
__output != nullptr)
Reported by FlawFinder.
Line: 44
Column: 53
CWE codes:
126
} else {
levelStr = debugLevel.c_str();
TORCH_CHECK(
strncmp(levelStr, kDistDebugDetailLogLevel, strlen(kDistDebugDetailLogLevel)) == 0
|| strncmp(levelStr, kDistDebugInfoLogLevel, strlen(kDistDebugInfoLogLevel)) == 0
|| strncmp(levelStr, kDistDebugOffLogLevel, strlen(kDistDebugOffLogLevel)) == 0,
c10::str(
"Expected environment variable TORCH_DISTRIBUTED_DEBUG to be one of ",
kDistDebugDetailLogLevel,
Reported by FlawFinder.
Line: 45
Column: 54
CWE codes:
126
levelStr = debugLevel.c_str();
TORCH_CHECK(
strncmp(levelStr, kDistDebugDetailLogLevel, strlen(kDistDebugDetailLogLevel)) == 0
|| strncmp(levelStr, kDistDebugInfoLogLevel, strlen(kDistDebugInfoLogLevel)) == 0
|| strncmp(levelStr, kDistDebugOffLogLevel, strlen(kDistDebugOffLogLevel)) == 0,
c10::str(
"Expected environment variable TORCH_DISTRIBUTED_DEBUG to be one of ",
kDistDebugDetailLogLevel,
" ",
Reported by FlawFinder.
Line: 46
Column: 53
CWE codes:
126
TORCH_CHECK(
strncmp(levelStr, kDistDebugDetailLogLevel, strlen(kDistDebugDetailLogLevel)) == 0
|| strncmp(levelStr, kDistDebugInfoLogLevel, strlen(kDistDebugInfoLogLevel)) == 0
|| strncmp(levelStr, kDistDebugOffLogLevel, strlen(kDistDebugOffLogLevel)) == 0,
c10::str(
"Expected environment variable TORCH_DISTRIBUTED_DEBUG to be one of ",
kDistDebugDetailLogLevel,
" ",
kDistDebugInfoLogLevel,
Reported by FlawFinder.