The following issues were found
torch/distributed/elastic/events/handlers.py
4 issues
Line: 21
Column: 5
def get_logging_handler(destination: str = "null") -> logging.Handler:
global _log_handlers
return _log_handlers[destination]
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
Reported by Pylint.
Line: 20
Column: 1
}
def get_logging_handler(destination: str = "null") -> logging.Handler:
global _log_handlers
return _log_handlers[destination]
Reported by Pylint.
Line: 21
Column: 5
def get_logging_handler(destination: str = "null") -> logging.Handler:
global _log_handlers
return _log_handlers[destination]
Reported by Pylint.
tools/vscode_settings.py
4 issues
Line: 13
Column: 12
path = folder / 'settings.json'
try:
current = json.loads(path.read_text())
except Exception:
current = {}
with open(path, 'w') as f:
json.dump({**current, **recommended}, f, indent=2)
f.write('\n')
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
import json
from pathlib import Path
def main() -> None:
folder = Path('.vscode')
recommended = json.loads((folder / 'settings_recommended.json').read_text())
Reported by Pylint.
Line: 7
Column: 1
from pathlib import Path
def main() -> None:
folder = Path('.vscode')
recommended = json.loads((folder / 'settings_recommended.json').read_text())
path = folder / 'settings.json'
try:
current = json.loads(path.read_text())
Reported by Pylint.
Line: 15
Column: 29
current = json.loads(path.read_text())
except Exception:
current = {}
with open(path, 'w') as f:
json.dump({**current, **recommended}, f, indent=2)
f.write('\n')
if __name__ == '__main__':
Reported by Pylint.
torch/distributed/elastic/rendezvous/utils.py
4 issues
Line: 1
Column: 1
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ipaddress
import random
import re
Reported by Pylint.
Line: 32
Column: 9
return config
key_values = config_str.split(",")
for kv in key_values:
key, *values = kv.split("=", 1)
key = key.strip()
if not key:
raise ValueError(
Reported by Pylint.
Line: 157
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b311-random
bound within which a random delay will be picked.
"""
if isinstance(seconds, tuple):
seconds = random.uniform(*seconds)
# Ignore delay requests that are less than 10 milliseconds.
if seconds >= 0.01:
time.sleep(seconds)
Reported by Bandit.
Line: 175
Column: 5
# The state of the timer is hold in a separate context object to avoid a
# reference cycle between the timer and the background thread.
class _Context:
interval: float
function: Callable[..., None]
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
stop_event: Event
Reported by Pylint.
torch/distributed/elastic/rendezvous/static_tcp_rendezvous.py
4 issues
Line: 1
Column: 1
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
Reported by Pylint.
Line: 21
Column: 1
log = logging.getLogger(__name__)
_default_timeout_seconds = 600
class StaticTCPRendezvous(RendezvousHandler):
"""
Static rendezvous that is a wrapper around the TCPStore.
Reported by Pylint.
Line: 31
Column: 5
listener on the agent with group_rank=0
"""
def __init__(
self,
master_addr: str,
master_port: int,
rank: int,
world_size: int,
Reported by Pylint.
Line: 82
Column: 1
return True
def create_rdzv_handler(params: RendezvousParameters) -> RendezvousHandler:
if "rank" not in params.config:
raise ValueError(
"rank is absent in RendezvousParameters."
"Try add --node_rank to the cmd request"
)
Reported by Pylint.
torch/csrc/jit/frontend/ir_emitter.cpp
4 issues
Line: 1747
CWE codes:
908
if (true_exits && false_exits) {
continue;
} else if (true_exits) {
tv = graph->createUninitialized(fv->type())
->insertBefore(true_block->return_node())
->output();
graph->createStore(x, tv)->insertBefore(true_block->return_node());
} else if (false_exits) {
fv = graph->createUninitialized(tv->type())
Reported by Cppcheck.
Line: 1752
CWE codes:
908
->output();
graph->createStore(x, tv)->insertBefore(true_block->return_node());
} else if (false_exits) {
fv = graph->createUninitialized(tv->type())
->insertBefore(false_block->return_node())
->output();
graph->createStore(x, fv)->insertBefore(false_block->return_node());
}
Reported by Cppcheck.
Line: 1758
CWE codes:
908
graph->createStore(x, fv)->insertBefore(false_block->return_node());
}
auto unified = unifyTypes(tv->type(), fv->type());
// attempt to unify the types. we allow variables to be set to different
// types in each branch as long as that variable is not already in scope,
// or if that variable does not get used later. here, we save the error
// so that the error message will be more informative in the case that is
Reported by Cppcheck.
Line: 1758
CWE codes:
908
graph->createStore(x, fv)->insertBefore(false_block->return_node());
}
auto unified = unifyTypes(tv->type(), fv->type());
// attempt to unify the types. we allow variables to be set to different
// types in each branch as long as that variable is not already in scope,
// or if that variable does not get used later. here, we save the error
// so that the error message will be more informative in the case that is
Reported by Cppcheck.
torch/ao/sparsity/sparsifier/utils.py
4 issues
Line: 1
Column: 1
from torch import nn
# Parametrizations
class FakeSparsity(nn.Module):
r"""Parametrization for the weights. Should be attached to the 'weight' or
any other parmeter that requires a mask applied to it.
Note::
Reported by Pylint.
Line: 18
Column: 5
super().__init__()
self.register_buffer('mask', mask)
def forward(self, x):
assert self.mask.shape == x.shape
return self.mask * x
Reported by Pylint.
Line: 18
Column: 5
super().__init__()
self.register_buffer('mask', mask)
def forward(self, x):
assert self.mask.shape == x.shape
return self.mask * x
Reported by Pylint.
Line: 19
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self.register_buffer('mask', mask)
def forward(self, x):
assert self.mask.shape == x.shape
return self.mask * x
Reported by Bandit.
torch/csrc/deploy/example/fx/some_dependency.py
4 issues
Line: 1
Column: 1
# dependency for torch package
def a_non_torch_leaf(a: int, b):
return a * b
Reported by Pylint.
Line: 3
Column: 1
# dependency for torch package
def a_non_torch_leaf(a: int, b):
return a * b
Reported by Pylint.
Line: 3
Column: 1
# dependency for torch package
def a_non_torch_leaf(a: int, b):
return a * b
Reported by Pylint.
Line: 3
Column: 1
# dependency for torch package
def a_non_torch_leaf(a: int, b):
return a * b
Reported by Pylint.
torch/csrc/deploy/interpreter/interpreter_impl.cpp
4 issues
Line: 93
Column: 5
CWE codes:
134
Suggestion:
Use a constant format string for syslog
_(spwd) \
_(_ssl) \
_(_struct) \
_(syslog) \
_(termios) \
_(_testbuffer) \
_(_testcapi) \
_(_testimportmultiple) \
_(_testmultiphase) \
Reported by FlawFinder.
Line: 235
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
}
/* Copy the tables into the new memory */
memcpy(p, PyImport_FrozenModules, (c + 1) * sizeof(struct _frozen));
memcpy(p + c, frozenpython, (a + 1) * sizeof(struct _frozen));
memcpy(p + a + c, frozentorch, (b + 1) * sizeof(struct _frozen));
PyImport_FrozenModules = p;
return res;
}
Reported by FlawFinder.
Line: 236
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
/* Copy the tables into the new memory */
memcpy(p, PyImport_FrozenModules, (c + 1) * sizeof(struct _frozen));
memcpy(p + c, frozenpython, (a + 1) * sizeof(struct _frozen));
memcpy(p + a + c, frozentorch, (b + 1) * sizeof(struct _frozen));
PyImport_FrozenModules = p;
return res;
}
Reported by FlawFinder.
Line: 237
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
/* Copy the tables into the new memory */
memcpy(p, PyImport_FrozenModules, (c + 1) * sizeof(struct _frozen));
memcpy(p + c, frozenpython, (a + 1) * sizeof(struct _frozen));
memcpy(p + a + c, frozentorch, (b + 1) * sizeof(struct _frozen));
PyImport_FrozenModules = p;
return res;
}
static py::object global_impl(const char* module, const char* name) {
Reported by FlawFinder.
torch/csrc/autograd/FunctionsManual.cpp
3 issues
Line: 84
Column: 7
CWE codes:
126
template <typename T>
T not_implemented_base(const char* name, const char* reason) {
std::string msg = c10::str("the derivative for '", name, "' is not implemented.");
if (strlen(reason) > 0) {
msg = c10::str(msg, " ", reason);
};
TORCH_CHECK_NOT_IMPLEMENTED(false, msg);
}
Reported by FlawFinder.
Line: 225
Column: 16
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
}
Tensor pow_backward(Tensor grad, const Tensor & self, const Scalar & exponent) {
if (exponent.equal(0.0)) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
auto grad_lambda = [&](auto exp) { return grad * (exp * self.pow(exp - 1)).conj(); };
Tensor out = (exponent.isComplex()) ? grad_lambda(exponent.toComplexDouble()) : grad_lambda(exponent.toDouble());
return handle_r_to_c(self, out);
Reported by FlawFinder.
Line: 263
Column: 12
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
Tensor pow_backward_exponent(Tensor grad, const Scalar & base, const Tensor& exponent, Tensor result) {
auto grad_lambda = [](Tensor a, Scalar b) { return (a * b.log()).conj(); };
if (base.equal(0.0)) {
auto cond = [](auto exp) {
if (exp.is_complex()) {
return at::logical_and(at::imag(exp) == 0, at::real(exp) >= 0);
} else {
return exp >=0;
Reported by FlawFinder.
tools/codegen/api/meta.py
3 issues
Line: 1
Column: 1
from tools.codegen.model import NativeFunctionsGroup
# Follows dispatcher calling convention, but:
# - Mutable arguments not allowed. Meta functions are always
# written in functional form. Look at FunctionSchema.signature()
# - No tensor returns; instead we return a TensorMeta describing
# the tensor in question
def name(g: NativeFunctionsGroup) -> str:
Reported by Pylint.
Line: 9
Column: 1
# - No tensor returns; instead we return a TensorMeta describing
# the tensor in question
def name(g: NativeFunctionsGroup) -> str:
# use the overload name from the functional version
return str(g.functional.func.name).replace('.', '_')
Reported by Pylint.
Line: 9
Column: 1
# - No tensor returns; instead we return a TensorMeta describing
# the tensor in question
def name(g: NativeFunctionsGroup) -> str:
# use the overload name from the functional version
return str(g.functional.func.name).replace('.', '_')
Reported by Pylint.