The following issues were found
torch/distributed/elastic/rendezvous/api.py
4 issues
Line: 199
Column: 13
try:
return int(value)
except ValueError:
raise ValueError(
f"The rendezvous configuration option '{key}' does not represent a valid integer "
"value."
)
Reported by Pylint.
Line: 248
Column: 13
try:
creator = self._registry[params.backend]
except KeyError:
raise ValueError(
f"The rendezvous backend '{params.backend}' is not registered. Did you forget "
f"to call `{self.register.__name__}`?"
)
handler = creator(params)
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, Optional, Tuple
Reported by Pylint.
Line: 139
Column: 5
Additional parameters for the specified backend.
"""
def __init__(
self,
backend: str,
endpoint: str,
run_id: str,
min_nodes: int,
Reported by Pylint.
torch/csrc/api/include/torch/serialize/input-archive.h
4 issues
Line: 47
Column: 8
CWE codes:
120
20
~InputArchive() = default;
/// Reads an `IValue` associated with a given `key`.
void read(const std::string& key, c10::IValue& ivalue);
/// Reads an `IValue` associated with a given `key`. If there is no `IValue`
/// associated with the `key`, this returns false, otherwise it returns true.
bool try_read(const std::string& key, c10::IValue& ivalue);
Reported by FlawFinder.
Line: 62
Column: 8
CWE codes:
120
20
/// Reads a `tensor` associated with a given `key`.
/// If the tensor is expected to be a buffer (not differentiable), `is_buffer`
/// must be `true`.
void read(const std::string& key, Tensor& tensor, bool is_buffer = false);
/// Reads a `InputArchive` associated with a given `key`. If there is no
/// `InputArchive` associated with the `key`, this returns false, otherwise
/// it returns true.
bool try_read(const std::string& key, InputArchive& archive);
Reported by FlawFinder.
Line: 72
Column: 8
CWE codes:
120
20
/// Reads an `InputArchive` associated with a given `key`.
/// The archive can thereafter be used for further deserialization of the
/// nested data.
void read(const std::string& key, InputArchive& archive);
/// Loads the `InputArchive` from a serialized representation stored in the
/// file at `filename`. Storage are remapped using device option. If device
/// is not specified, the module is loaded to the original device.
void load_from(const std::string& filename,
Reported by FlawFinder.
Line: 105
Column: 5
CWE codes:
120
20
/// `OutputArchive` (where `operator()` forwards to `write()`).
template <typename... Ts>
void operator()(Ts&&... ts) {
read(std::forward<Ts>(ts)...);
}
private:
jit::Module module_;
std::string hierarchy_prefix_;
Reported by FlawFinder.
torch/cuda/_utils.py
4 issues
Line: 24
Column: 18
device if :attr:`optional` is ``True``.
"""
if isinstance(device, str):
device = torch.device(device)
if isinstance(device, torch.device):
if allow_cpu:
if device.type not in ['cuda', 'cpu']:
raise ValueError('Expected a cuda or cpu device, but got: {}'.format(device))
elif device.type != 'cuda':
Reported by Pylint.
Line: 25
Column: 27
"""
if isinstance(device, str):
device = torch.device(device)
if isinstance(device, torch.device):
if allow_cpu:
if device.type not in ['cuda', 'cpu']:
raise ValueError('Expected a cuda or cpu device, but got: {}'.format(device))
elif device.type != 'cuda':
raise ValueError('Expected a cuda device, but got: {}'.format(device))
Reported by Pylint.
Line: 1
Column: 1
import torch
from typing import Any
# The _get_device_index has been moved to torch.utils._get_device_index
from torch._utils import _get_device_index as _torch_get_device_index
def _get_device_index(device: Any, optional: bool = False,
allow_cpu: bool = False) -> int:
r"""Gets the device index from :attr:`device`, which can be a torch.device
Reported by Pylint.
Line: 2
Column: 1
import torch
from typing import Any
# The _get_device_index has been moved to torch.utils._get_device_index
from torch._utils import _get_device_index as _torch_get_device_index
def _get_device_index(device: Any, optional: bool = False,
allow_cpu: bool = False) -> int:
r"""Gets the device index from :attr:`device`, which can be a torch.device
Reported by Pylint.
torch/csrc/jit/passes/concat_opt.cpp
4 issues
Line: 31
Column: 6
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
}
}
bool equal(at::ArrayRef<Value*> list1, at::ArrayRef<Value*> list2) {
return list1.size() == list2.size() &&
std::equal(list1.begin(), list1.end(), list2.begin());
}
class ConcatCommonInputsEliminator {
Reported by FlawFinder.
Line: 33
Column: 12
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
bool equal(at::ArrayRef<Value*> list1, at::ArrayRef<Value*> list2) {
return list1.size() == list2.size() &&
std::equal(list1.begin(), list1.end(), list2.begin());
}
class ConcatCommonInputsEliminator {
public:
explicit ConcatCommonInputsEliminator(std::shared_ptr<Graph> graph)
Reported by FlawFinder.
Line: 108
Column: 11
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto prev_tensor_inputs =
prev_all_inputs.slice(0, prev_all_inputs.size() - 1);
auto prev_dim = prev_all_inputs.back();
if (equal(curr_tensor_inputs_prefix, prev_tensor_inputs) &&
curr_dim == prev_dim) {
if (!node->isDominatedBy(prev)) {
// We can't use the previous concatenated output if it does not
// dominate the current concat node.
continue;
Reported by FlawFinder.
Line: 153
Column: 11
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto prev_tensor_inputs =
prev_all_inputs.slice(0, prev_all_inputs.size() - 1);
auto prev_dim = prev_all_inputs.back();
if (equal(curr_tensor_inputs_suffix, prev_tensor_inputs) &&
curr_dim == prev_dim) {
if (!node->isDominatedBy(prev)) {
// We can't use the previous concatenated list if it does not
// dominate the current list.
continue;
Reported by FlawFinder.
torch/distributed/pipeline/sync/phony.py
4 issues
Line: 13
Column: 1
import torch
from torch import Tensor
from .stream import default_stream, use_stream
__all__: List[str] = []
_phonies: Dict[Tuple[torch.device, bool], Tensor] = {}
Reported by Pylint.
Line: 18
Column: 22
__all__: List[str] = []
_phonies: Dict[Tuple[torch.device, bool], Tensor] = {}
def get_phony(device: torch.device, *, requires_grad: bool) -> Tensor:
"""Gets a phony. Phony is tensor without space. It is useful to make
arbitrary dependency in a autograd graph because it doesn't require any
Reported by Pylint.
Line: 21
Column: 23
_phonies: Dict[Tuple[torch.device, bool], Tensor] = {}
def get_phony(device: torch.device, *, requires_grad: bool) -> Tensor:
"""Gets a phony. Phony is tensor without space. It is useful to make
arbitrary dependency in a autograd graph because it doesn't require any
gradient accumulation.
.. note::
Reported by Pylint.
Line: 45
Column: 21
phony = _phonies[key]
except KeyError:
with use_stream(default_stream(device)):
phony = torch.empty(0, device=device, requires_grad=requires_grad)
_phonies[key] = phony
return phony
Reported by Pylint.
tools/code_coverage/package/tool/parser/gcov_coverage_parser.py
4 issues
Line: 3
Column: 1
from typing import Any, Dict, List, Set
from .coverage_record import CoverageRecord
class GcovCoverageParser:
"""
Accepts a parsed json produced by gcov --json-format -- typically,
representing a single C++ test and produces a list
Reported by Pylint.
Line: 1
Column: 1
from typing import Any, Dict, List, Set
from .coverage_record import CoverageRecord
class GcovCoverageParser:
"""
Accepts a parsed json produced by gcov --json-format -- typically,
representing a single C++ test and produces a list
Reported by Pylint.
Line: 6
Column: 1
from .coverage_record import CoverageRecord
class GcovCoverageParser:
"""
Accepts a parsed json produced by gcov --json-format -- typically,
representing a single C++ test and produces a list
of CoverageRecord(s).
"""
Reported by Pylint.
Line: 27
Column: 5
return True
return False
def parse(self) -> List[CoverageRecord]:
# The JSON format is described in the gcov source code
# https://gcc.gnu.org/onlinedocs/gcc/Invoking-Gcov.html
records: List[CoverageRecord] = []
for file_info in self._llvm_coverage["files"]:
filepath = file_info["file"]
Reported by Pylint.
torch/csrc/jit/tensorexpr/external_functions.cpp
4 issues
Line: 98
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
}
// TODO: can i haz an out version of the conv2d?
memcpy(buf_data[0], r.data_ptr(), r.element_size() * r.numel());
}
void nnc_aten_adaptive_avg_pool2d(
int64_t bufs_num,
void** buf_data,
Reported by FlawFinder.
Line: 140
Column: 5
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
const at::Tensor& x = tensors[1];
std::vector<int64_t> mean_dims(args_num);
if (args_num > 0) {
memcpy(mean_dims.data(), extra_args, sizeof(int64_t) * args_num);
}
try {
at::mean_out(r, x, mean_dims);
} catch (...) {
}
Reported by FlawFinder.
Line: 213
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
const at::Tensor& x = tensors[1];
auto context = reinterpret_cast<LinearOpContext*>(buf_data[2]);
at::Tensor output = context->run(x);
memcpy(
buf_data[0], output.data_ptr(), output.element_size() * output.numel());
}
void nnc_prepacked_conv2d_clamp_run(
int64_t bufs_num,
Reported by FlawFinder.
Line: 233
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
const at::Tensor& x = tensors[1];
auto context = reinterpret_cast<Conv2dOpContext*>(buf_data[2]);
at::Tensor output = context->run(x);
memcpy(
buf_data[0], output.data_ptr(), output.element_size() * output.numel());
}
#endif // USE_XNNPACK
Reported by FlawFinder.
torch/distributed/algorithms/model_averaging/utils.py
4 issues
Line: 23
Column: 19
return
params_it1, params_it2 = itertools.tee(params)
flat_params = torch.cat([p.data.view(-1) for p in params_it1])
flat_params /= dist.get_world_size(group_to_use)
# Make sure the allreduce will not conflict with any other ongoing process group.
if torch.cuda.is_available():
torch.cuda.synchronize()
dist.all_reduce(flat_params, group=group_to_use)
Reported by Pylint.
Line: 19
Column: 8
"""
group_to_use = process_group if process_group is not None else dist.group.WORLD
# Do not update any parameter if not in the process group.
if dist._rank_not_in_group(group_to_use):
return
params_it1, params_it2 = itertools.tee(params)
flat_params = torch.cat([p.data.view(-1) for p in params_it1])
flat_params /= dist.get_world_size(group_to_use)
Reported by Pylint.
Line: 1
Column: 1
# flake8: noqa C101
import itertools
from typing import Iterator
import torch
import torch.distributed as dist
def average_parameters(
Reported by Pylint.
Line: 31
Column: 9
dist.all_reduce(flat_params, group=group_to_use)
offset = 0
for p in params_it2:
p.data = flat_params[offset : offset + p.numel()].view_as(p)
offset += p.numel()
Reported by Pylint.
torch/cpu/amp/autocast_mode.py
4 issues
Line: 8
Column: 49
See :class:`torch.autocast`.
``torch.cpu.amp.autocast(args...)`` is equivalent to ``torch.autocast("cpu", args...)``
"""
def __init__(self, enabled=True, fast_dtype=torch.float16):
super().__init__("cpu", enabled=enabled, fast_dtype=fast_dtype)
Reported by Pylint.
Line: 1
Column: 1
import torch
class autocast(torch.autocast_mode.autocast):
r"""
See :class:`torch.autocast`.
``torch.cpu.amp.autocast(args...)`` is equivalent to ``torch.autocast("cpu", args...)``
"""
def __init__(self, enabled=True, fast_dtype=torch.float16):
super().__init__("cpu", enabled=enabled, fast_dtype=fast_dtype)
Reported by Pylint.
Line: 3
Column: 1
import torch
class autocast(torch.autocast_mode.autocast):
r"""
See :class:`torch.autocast`.
``torch.cpu.amp.autocast(args...)`` is equivalent to ``torch.autocast("cpu", args...)``
"""
def __init__(self, enabled=True, fast_dtype=torch.float16):
super().__init__("cpu", enabled=enabled, fast_dtype=fast_dtype)
Reported by Pylint.
Line: 3
Column: 1
import torch
class autocast(torch.autocast_mode.autocast):
r"""
See :class:`torch.autocast`.
``torch.cpu.amp.autocast(args...)`` is equivalent to ``torch.autocast("cpu", args...)``
"""
def __init__(self, enabled=True, fast_dtype=torch.float16):
super().__init__("cpu", enabled=enabled, fast_dtype=fast_dtype)
Reported by Pylint.
torch/csrc/jit/tensorexpr/mem_dependency_checker.cpp
4 issues
Line: 559
Column: 16
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
// If there were loads in the indices, this load depends on them, and merge
// them in.
if (!indicesScope->accesses_.empty()) {
for (auto& access : indicesScope->accesses_) {
load->addDependency(access);
access->addDependent(load);
}
mergeScope(indicesScope, indicesScope->parent, false);
}
Reported by FlawFinder.
Line: 560
Column: 27
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
// them in.
if (!indicesScope->accesses_.empty()) {
for (auto& access : indicesScope->accesses_) {
load->addDependency(access);
access->addDependent(load);
}
mergeScope(indicesScope, indicesScope->parent, false);
}
Reported by FlawFinder.
Line: 82
Column: 66
CWE codes:
120
20
TORCH_INTERNAL_ASSERT(res.second);
}
void AccessInfo::addDependent(const std::shared_ptr<AccessInfo>& read) {
auto res = dependents_.emplace(read->id(), read);
TORCH_INTERNAL_ASSERT(res.second);
}
bool AccessInfo::hasDependency(const std::shared_ptr<AccessInfo>& info) const {
Reported by FlawFinder.
Line: 83
Column: 46
CWE codes:
120
20
}
void AccessInfo::addDependent(const std::shared_ptr<AccessInfo>& read) {
auto res = dependents_.emplace(read->id(), read);
TORCH_INTERNAL_ASSERT(res.second);
}
bool AccessInfo::hasDependency(const std::shared_ptr<AccessInfo>& info) const {
return dependencies_.count(info->id()) != 0;
Reported by FlawFinder.