The following issues were found
torch/distributed/elastic/multiprocessing/errors/handlers.py
2 issues
Line: 1
Column: 1
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Multiprocessing error-reporting module
Reported by Pylint.
Line: 14
Column: 1
from torch.distributed.elastic.multiprocessing.errors.error_handler import ErrorHandler
def get_error_handler():
return ErrorHandler()
Reported by Pylint.
torch/distributed/constants.py
2 issues
Line: 1
Column: 1
from torch._C._distributed_c10d import _DEFAULT_PG_TIMEOUT
# Default process group wide timeout, if applicable.
# This only applies to the gloo and nccl backends
# (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).
# To make an attempt at backwards compatibility with THD, we use an
# extraordinarily high default timeout, given that THD did not have timeouts.
default_pg_timeout = _DEFAULT_PG_TIMEOUT
Reported by Pylint.
Line: 1
Column: 1
from torch._C._distributed_c10d import _DEFAULT_PG_TIMEOUT
# Default process group wide timeout, if applicable.
# This only applies to the gloo and nccl backends
# (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).
# To make an attempt at backwards compatibility with THD, we use an
# extraordinarily high default timeout, given that THD did not have timeouts.
default_pg_timeout = _DEFAULT_PG_TIMEOUT
Reported by Pylint.
torch/distributed/_sharding_spec/__init__.py
2 issues
Line: 1
Column: 1
from .api import (
ChunkShardingSpec,
DevicePlacementSpec,
EnumerableShardingSpec,
PlacementSpec,
ShardMetadata,
ShardingSpec,
)
Reported by Pylint.
Line: 1
Column: 1
from .api import (
ChunkShardingSpec,
DevicePlacementSpec,
EnumerableShardingSpec,
PlacementSpec,
ShardMetadata,
ShardingSpec,
)
Reported by Pylint.
torch/cuda/comm.py
2 issues
Line: 1
Column: 1
# The functions here have been moved to torch.nn.parallel.comm
from torch.nn.parallel.comm import broadcast, broadcast_coalesced, reduce_add, \
reduce_add_coalesced, scatter, gather
__all__ = ['broadcast', 'broadcast_coalesced', 'reduce_add', 'reduce_add_coalesced', 'scatter', 'gather']
Reported by Pylint.
Line: 5
Column: 1
from torch.nn.parallel.comm import broadcast, broadcast_coalesced, reduce_add, \
reduce_add_coalesced, scatter, gather
__all__ = ['broadcast', 'broadcast_coalesced', 'reduce_add', 'reduce_add_coalesced', 'scatter', 'gather']
Reported by Pylint.
torch/csrc/serialization.cpp
2 issues
Line: 73
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
}
// Slurp it into the buffer we actually want
memcpy(buf, py_buf, size);
return size;
}
// Either does fildes.readinto(buf) or fildes.write(buf)
Reported by FlawFinder.
Line: 19
Column: 10
CWE codes:
120
20
template <>
ssize_t doPartialRead<int>(int fildes, void* buf, size_t nbytes) {
return read(fildes, buf, nbytes);
}
template <>
ssize_t doPartialRead<PyObject*>(PyObject* fildes, void* buf, size_t nbytes) {
// Try to use fildes.readinto() instead of fildes.read()
Reported by FlawFinder.
torch/distributed/elastic/timer/__init__.py
2 issues
Line: 42
Column: 1
complete, then the worker process is killed and the agent retries the worker group.
"""
from .api import TimerClient, TimerRequest, TimerServer, configure, expires # noqa: F401
from .local_timer import LocalTimerClient, LocalTimerServer # noqa: F401
Reported by Pylint.
Line: 43
Column: 1
"""
from .api import TimerClient, TimerRequest, TimerServer, configure, expires # noqa: F401
from .local_timer import LocalTimerClient, LocalTimerServer # noqa: F401
Reported by Pylint.
c10/cuda/CUDAMiscFunctions.cpp
2 issues
Line: 8
Column: 39
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
namespace cuda {
const char* get_cuda_check_suffix() noexcept {
static char* device_blocking_flag = getenv("CUDA_LAUNCH_BLOCKING");
static bool blocking_enabled =
(device_blocking_flag && atoi(device_blocking_flag));
if (blocking_enabled) {
return "";
} else {
Reported by FlawFinder.
Line: 10
Column: 32
CWE codes:
190
Suggestion:
If source untrusted, check both minimum and maximum, even if the input had no minus sign (large numbers can roll over into negative number; consider saving to an unsigned value if that is intended)
const char* get_cuda_check_suffix() noexcept {
static char* device_blocking_flag = getenv("CUDA_LAUNCH_BLOCKING");
static bool blocking_enabled =
(device_blocking_flag && atoi(device_blocking_flag));
if (blocking_enabled) {
return "";
} else {
return "\nCUDA kernel errors might be asynchronously reported at some"
" other API call,so the stacktrace below might be incorrect."
Reported by FlawFinder.
.circleci/cimodel/data/simple/docker_definitions.py
2 issues
Line: 7
Column: 3
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN
# TODO: make this generated from a matrix rather than just a static list
IMAGE_NAMES = [
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7",
"pytorch-linux-bionic-py3.6-clang9",
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.6-clang9",
"pytorch-linux-bionic-py3.8-gcc9",
Reported by Pylint.
Line: 1
Column: 1
from collections import OrderedDict
from cimodel.lib.miniutils import quote
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN
# TODO: make this generated from a matrix rather than just a static list
IMAGE_NAMES = [
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7",
Reported by Pylint.
.circleci/cimodel/data/simple/util/branch_filters.py
2 issues
Line: 14
Column: 1
RC_PATTERN = r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
def gen_filter_dict(
branches_list=NON_PR_BRANCH_LIST,
tags_list=None
):
"""Generates a filter dictionary for use with CircleCI's job filter"""
filter_dict = {
Reported by Pylint.
Line: 1
Column: 1
NON_PR_BRANCH_LIST = [
"master",
r"/ci-all\/.*/",
r"/release\/.*/",
]
PR_BRANCH_LIST = [
r"/gh\/.*\/head/",
r"/pull\/.*/",
Reported by Pylint.
.jenkins/pytorch/print_sccache_log.py
2 issues
Line: 1
Column: 1
import sys
log_file_path = sys.argv[1]
with open(log_file_path) as f:
lines = f.readlines()
for line in lines:
# Ignore errors from CPU instruction set, symbol existing testing,
Reported by Pylint.
Line: 16
Column: 8
'CheckSymbolExists.c',
'test_compilation_error_formatting',
]
if all([keyword not in line for keyword in ignored_keywords]):
print(line)
Reported by Pylint.