The following issues were found
torch/cuda/amp/common.py
3 issues
Line: 1
Column: 1
import torch
from importlib.util import find_spec
def amp_definitely_not_available():
return not (torch.cuda.is_available() or find_spec('torch_xla'))
Reported by Pylint.
Line: 2
Column: 1
import torch
from importlib.util import find_spec
def amp_definitely_not_available():
return not (torch.cuda.is_available() or find_spec('torch_xla'))
Reported by Pylint.
Line: 5
Column: 1
from importlib.util import find_spec
def amp_definitely_not_available():
return not (torch.cuda.is_available() or find_spec('torch_xla'))
Reported by Pylint.
torch/distributed/elastic/multiprocessing/redirects.py
3 issues
Line: 1
Column: 1
# !/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Taken and modified from original source:
Reported by Pylint.
Line: 25
Column: 1
logger = logging.getLogger(__name__)
def get_libc():
if IS_WINDOWS or IS_MACOS:
logger.warning(
"NOTE: Redirects are currently not supported in Windows or MacOs."
)
return None
Reported by Pylint.
Line: 26
Column: 5
def get_libc():
if IS_WINDOWS or IS_MACOS:
logger.warning(
"NOTE: Redirects are currently not supported in Windows or MacOs."
)
return None
else:
Reported by Pylint.
torch/distributed/elastic/utils/data/__init__.py
3 issues
Line: 9
Column: 1
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .cycling_iterator import CyclingIterator # noqa: F401
from .elastic_distributed_sampler import ElasticDistributedSampler # noqa: F401
Reported by Pylint.
Line: 10
Column: 1
# LICENSE file in the root directory of this source tree.
from .cycling_iterator import CyclingIterator # noqa: F401
from .elastic_distributed_sampler import ElasticDistributedSampler # noqa: F401
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .cycling_iterator import CyclingIterator # noqa: F401
Reported by Pylint.
torch/distributed/launch.py
3 issues
Line: 153
Column: 1
logger = logging.getLogger(__name__)
def parse_args(args):
parser = get_args_parser()
parser.add_argument(
"--use_env",
default=False,
action="store_true",
Reported by Pylint.
Line: 167
Column: 1
return parser.parse_args(args)
def launch(args):
if args.no_python and not args.use_env:
raise ValueError(
"When using the '--no_python' flag,"
" you must also set the '--use_env' flag."
)
Reported by Pylint.
Line: 176
Column: 1
run(args)
def main(args=None):
warnings.warn(
"The module torch.distributed.launch is deprecated\n"
"and will be removed in future. Use torch.distributed.run.\n"
"Note that --use_env is set by default in torch.distributed.run.\n"
"If your script expects `--local_rank` argument to be set, please\n"
Reported by Pylint.
torch/distributed/nn/__init__.py
3 issues
Line: 1
Column: 1
from .api.remote_module import RemoteModule
from .functional import * # noqa: F403
Reported by Pylint.
Line: 2
Column: 1
from .api.remote_module import RemoteModule
from .functional import * # noqa: F403
Reported by Pylint.
Line: 1
Column: 1
from .api.remote_module import RemoteModule
from .functional import * # noqa: F403
Reported by Pylint.
torch/distributed/nn/jit/templates/remote_module_template.py
3 issues
Line: 60
Column: 3
# This template may cause typing error (the mismatch between ``Tuple[()]`` and ``Tuple[Any]``)
# even if the code is only used for instaniation but not execution.
# Therefore, only include handling moving CPU tensors to a cuda device if necessary.
# TODO: Merge these two templates together in the future once TorchScript syntax is improved.
_REMOTE_FORWARD_TEMPLATE_ENABLE_MOVING_CPU_TENSORS_TO_CUDA = """
def _remote_forward(
module_rref: RRef[module_interface_cls], device: str, is_device_map_set: bool, {arg_types}){arrow_and_return_type}:
module = module_rref.local_value()
device = torch.device(device)
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/python3
def get_remote_module_template(enable_moving_cpu_tensors_to_cuda: bool):
return _TEMPLATE_PREFIX + (
_REMOTE_FORWARD_TEMPLATE_ENABLE_MOVING_CPU_TENSORS_TO_CUDA
if enable_moving_cpu_tensors_to_cuda
else _REMOTE_FORWARD_TEMPLATE
)
Reported by Pylint.
Line: 4
Column: 1
#!/usr/bin/python3
def get_remote_module_template(enable_moving_cpu_tensors_to_cuda: bool):
return _TEMPLATE_PREFIX + (
_REMOTE_FORWARD_TEMPLATE_ENABLE_MOVING_CPU_TENSORS_TO_CUDA
if enable_moving_cpu_tensors_to_cuda
else _REMOTE_FORWARD_TEMPLATE
)
Reported by Pylint.
torch/distributed/pipeline/sync/__init__.py
3 issues
Line: 8
Column: 1
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""A Pipe implementation in PyTorch."""
from .checkpoint import is_checkpointing, is_recomputing
from .pipe import Pipe
from .microbatch import NoChunk
__all__ = ["Pipe", "is_checkpointing", "is_recomputing"]
Reported by Pylint.
Line: 9
Column: 1
# LICENSE file in the root directory of this source tree.
"""A Pipe implementation in PyTorch."""
from .checkpoint import is_checkpointing, is_recomputing
from .pipe import Pipe
from .microbatch import NoChunk
__all__ = ["Pipe", "is_checkpointing", "is_recomputing"]
Reported by Pylint.
Line: 10
Column: 1
"""A Pipe implementation in PyTorch."""
from .checkpoint import is_checkpointing, is_recomputing
from .pipe import Pipe
from .microbatch import NoChunk
__all__ = ["Pipe", "is_checkpointing", "is_recomputing"]
Reported by Pylint.
torch/distributed/pipeline/sync/utils.py
3 issues
Line: 27
Column: 13
balanced_pipe = []
for num_layers in balance:
layers = []
for i in range(num_layers):
layers.append(module[pipe_idx])
pipe_idx += 1
device = device_idx if devices is None else devices[device_idx]
balanced_pipe.append(nn.Sequential(*layers).to(device))
device_idx += 1
Reported by Pylint.
Line: 1
Column: 1
from torch import nn
from typing import List
def partition_model(
module: nn.Sequential,
balance: List[int],
devices: List[int] = None):
"""
Given an :class:`nn.Sequential <torch.nn.Sequential>` module, partitions
Reported by Pylint.
Line: 2
Column: 1
from torch import nn
from typing import List
def partition_model(
module: nn.Sequential,
balance: List[int],
devices: List[int] = None):
"""
Given an :class:`nn.Sequential <torch.nn.Sequential>` module, partitions
Reported by Pylint.
torch/distributed/rpc/functions.py
3 issues
Line: 164
Column: 5
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
# Can't declare and use attributes of function objects (mypy#2087)
wrapper._wrapped_async_rpc_function = fn # type: ignore[attr-defined]
return wrapper
Reported by Pylint.
Line: 1
Column: 1
import functools
def async_execution(fn):
r"""
A decorator for a function indicating that the return value of the function
is guaranteed to be a :class:`~torch.futures.Future` object and this
function can run asynchronously on the RPC callee. More specifically, the
callee extracts the :class:`~torch.futures.Future` returned by the wrapped
Reported by Pylint.
Line: 4
Column: 1
import functools
def async_execution(fn):
r"""
A decorator for a function indicating that the return value of the function
is guaranteed to be a :class:`~torch.futures.Future` object and this
function can run asynchronously on the RPC callee. More specifically, the
callee extracts the :class:`~torch.futures.Future` returned by the wrapped
Reported by Pylint.
test/package/package_a/use_dunder_package.py
3 issues
Line: 1
Column: 1
if "__torch_package__" in dir():
def is_from_package():
return True
else:
def is_from_package():
Reported by Pylint.
Line: 3
Column: 5
if "__torch_package__" in dir():
def is_from_package():
return True
else:
def is_from_package():
Reported by Pylint.
Line: 9
Column: 5
else:
def is_from_package():
return False
Reported by Pylint.