The following issues were found
torch/distributions/poisson.py
15 issues
Line: 41
Column: 27
def __init__(self, rate, validate_args=None):
self.rate, = broadcast_all(rate)
if isinstance(rate, Number):
batch_shape = torch.Size()
else:
batch_shape = self.rate.size()
super(Poisson, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
Reported by Pylint.
Line: 48
Column: 23
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Poisson, _instance)
batch_shape = torch.Size(batch_shape)
new.rate = self.rate.expand(batch_shape)
super(Poisson, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
Reported by Pylint.
Line: 54
Column: 35
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.poisson(self.rate.expand(shape))
def log_prob(self, value):
Reported by Pylint.
Line: 57
Column: 20
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.poisson(self.rate.expand(shape))
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
rate, value = broadcast_all(self.rate, value)
Reported by Pylint.
Line: 67
Column: 17
@property
def _natural_params(self):
return (torch.log(self.rate), )
def _log_normalizer(self, x):
return torch.exp(x)
Reported by Pylint.
Line: 70
Column: 16
return (torch.log(self.rate), )
def _log_normalizer(self, x):
return torch.exp(x)
Reported by Pylint.
Line: 9
Column: 1
from torch.distributions.utils import broadcast_all
class Poisson(ExponentialFamily):
r"""
Creates a Poisson distribution parameterized by :attr:`rate`, the rate parameter.
Samples are nonnegative integers, with a pmf given by
Reported by Pylint.
Line: 9
Column: 1
from torch.distributions.utils import broadcast_all
class Poisson(ExponentialFamily):
r"""
Creates a Poisson distribution parameterized by :attr:`rate`, the rate parameter.
Samples are nonnegative integers, with a pmf given by
Reported by Pylint.
Line: 9
Column: 1
from torch.distributions.utils import broadcast_all
class Poisson(ExponentialFamily):
r"""
Creates a Poisson distribution parameterized by :attr:`rate`, the rate parameter.
Samples are nonnegative integers, with a pmf given by
Reported by Pylint.
Line: 9
Column: 1
from torch.distributions.utils import broadcast_all
class Poisson(ExponentialFamily):
r"""
Creates a Poisson distribution parameterized by :attr:`rate`, the rate parameter.
Samples are nonnegative integers, with a pmf given by
Reported by Pylint.
torch/distributed/elastic/timer/local_timer.py
15 issues
Line: 14
Column: 1
from queue import Empty
from typing import Any, Dict, List, Set, Tuple
from .api import RequestQueue, TimerClient, TimerRequest, TimerServer
class LocalTimerClient(TimerClient):
"""
Client side of ``LocalTimerServer``. This client is meant to be used
Reported by Pylint.
Line: 118
Column: 13
os.kill(worker_id, signal.SIGKILL)
return True
except ProcessLookupError:
logging.info(f"Process with pid={worker_id} does not exist. Skipping")
return True
except Exception as e:
logging.error(f"Error terminating pid={worker_id}", exc_info=e)
return False
Reported by Pylint.
Line: 120
Column: 16
except ProcessLookupError:
logging.info(f"Process with pid={worker_id} does not exist. Skipping")
return True
except Exception as e:
logging.error(f"Error terminating pid={worker_id}", exc_info=e)
return False
Reported by Pylint.
Line: 121
Column: 13
logging.info(f"Process with pid={worker_id} does not exist. Skipping")
return True
except Exception as e:
logging.error(f"Error terminating pid={worker_id}", exc_info=e)
return False
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing as mp
import os
import signal
Reported by Pylint.
Line: 30
Column: 5
super().__init__()
self._mp_queue = mp_queue
def acquire(self, scope_id, expiration_time):
pid = os.getpid()
acquire_request = TimerRequest(pid, scope_id, expiration_time)
self._mp_queue.put(acquire_request)
def release(self, scope_id):
Reported by Pylint.
Line: 35
Column: 5
acquire_request = TimerRequest(pid, scope_id, expiration_time)
self._mp_queue.put(acquire_request)
def release(self, scope_id):
pid = os.getpid()
release_request = TimerRequest(pid, scope_id, -1)
self._mp_queue.put(release_request)
Reported by Pylint.
Line: 50
Column: 5
super().__init__()
self._mp_queue = mp_queue
def size(self) -> int:
return self._mp_queue.qsize()
def get(self, size, timeout: float) -> List[TimerRequest]:
requests = []
wait = timeout
Reported by Pylint.
Line: 53
Column: 5
def size(self) -> int:
return self._mp_queue.qsize()
def get(self, size, timeout: float) -> List[TimerRequest]:
requests = []
wait = timeout
for _ in range(0, size):
start = time.time()
Reported by Pylint.
Line: 60
Column: 17
start = time.time()
try:
r = self._mp_queue.get(block=True, timeout=wait)
except Empty:
break
requests.append(r)
wait = wait - (time.time() - start)
Reported by Pylint.
torch/csrc/jit/tensorexpr/codegen_external.py
15 issues
Line: 3
Column: 1
#!/usr/bin/env python3
import argparse
from tools.codegen.gen import parse_native_yaml, FileManager
import tools.codegen.model as model
def num_leading_spaces(line: str) -> int:
return len(line) - len(line.lstrip())
def deindent(code: str) -> str:
lines = code.split('\n')
Reported by Pylint.
Line: 4
Column: 1
#!/usr/bin/env python3
import argparse
from tools.codegen.gen import parse_native_yaml, FileManager
import tools.codegen.model as model
def num_leading_spaces(line: str) -> int:
return len(line) - len(line.lstrip())
def deindent(code: str) -> str:
lines = code.split('\n')
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
import argparse
from tools.codegen.gen import parse_native_yaml, FileManager
import tools.codegen.model as model
def num_leading_spaces(line: str) -> int:
return len(line) - len(line.lstrip())
def deindent(code: str) -> str:
lines = code.split('\n')
Reported by Pylint.
Line: 6
Column: 1
from tools.codegen.gen import parse_native_yaml, FileManager
import tools.codegen.model as model
def num_leading_spaces(line: str) -> int:
return len(line) - len(line.lstrip())
def deindent(code: str) -> str:
lines = code.split('\n')
min_leading_spaces = min(map(num_leading_spaces, lines))
lines = [line[min_leading_spaces:] for line in lines]
Reported by Pylint.
Line: 8
Column: 1
def num_leading_spaces(line: str) -> int:
return len(line) - len(line.lstrip())
def deindent(code: str) -> str:
lines = code.split('\n')
min_leading_spaces = min(map(num_leading_spaces, lines))
lines = [line[min_leading_spaces:] for line in lines]
return '\n'.join(lines)
Reported by Pylint.
Line: 15
Column: 1
return '\n'.join(lines)
def gen_external(native_functions_path, external_path):
native_functions = parse_native_yaml(native_functions_path)
func_decls = []
func_registrations = []
for func in native_functions:
schema = func.func
Reported by Pylint.
Line: 15
Column: 1
return '\n'.join(lines)
def gen_external(native_functions_path, external_path):
native_functions = parse_native_yaml(native_functions_path)
func_decls = []
func_registrations = []
for func in native_functions:
schema = func.func
Reported by Pylint.
Line: 32
Column: 1
continue
# Doesn't currently support kwarg arguments
if len(args.pre_tensor_options_kwarg_only) > 0 or len(args.post_tensor_options_kwarg_only) > 0:
continue
self_arg = [args.self_arg.argument] if args.self_arg is not None else []
args = list(args.pre_self_positional) + self_arg + list(args.post_self_positional)
tensor_args = [arg for arg in args if isinstance(arg.type, model.BaseType) and arg.type.name == model.BaseTy.Tensor]
if len(tensor_args) != len(args):
Reported by Pylint.
Line: 36
Column: 1
continue
self_arg = [args.self_arg.argument] if args.self_arg is not None else []
args = list(args.pre_self_positional) + self_arg + list(args.post_self_positional)
tensor_args = [arg for arg in args if isinstance(arg.type, model.BaseType) and arg.type.name == model.BaseTy.Tensor]
if len(tensor_args) != len(args):
continue
arg_names = [None] * len(args)
Reported by Pylint.
Line: 44
Column: 13
tensor_decls = []
for idx, arg in enumerate(tensor_args):
s = f"const at::Tensor& {arg.name} = tensors[{idx + 1}];"
tensor_decls.append(s)
arg_names[idx] = arg.name
nl = '\n'
# print(tensor_decls, name, arg_names)
Reported by Pylint.
tools/autograd/gen_autograd.py
15 issues
Line: 34
Column: 1
from tools.codegen.gen import parse_native_yaml
from tools.codegen.selective_build.selector import SelectiveBuilder
from typing import List
from . import gen_python_functions
from .gen_autograd_functions import gen_autograd_functions_lib, gen_autograd_functions_python
from .gen_trace_type import gen_trace_type
from .gen_variable_type import gen_variable_type
from .gen_inplace_or_view_type import gen_inplace_or_view_type
from .gen_variable_factories import gen_variable_factories
Reported by Pylint.
Line: 35
Column: 1
from tools.codegen.selective_build.selector import SelectiveBuilder
from typing import List
from . import gen_python_functions
from .gen_autograd_functions import gen_autograd_functions_lib, gen_autograd_functions_python
from .gen_trace_type import gen_trace_type
from .gen_variable_type import gen_variable_type
from .gen_inplace_or_view_type import gen_inplace_or_view_type
from .gen_variable_factories import gen_variable_factories
from .load_derivatives import load_derivatives
Reported by Pylint.
Line: 36
Column: 1
from typing import List
from . import gen_python_functions
from .gen_autograd_functions import gen_autograd_functions_lib, gen_autograd_functions_python
from .gen_trace_type import gen_trace_type
from .gen_variable_type import gen_variable_type
from .gen_inplace_or_view_type import gen_inplace_or_view_type
from .gen_variable_factories import gen_variable_factories
from .load_derivatives import load_derivatives
Reported by Pylint.
Line: 37
Column: 1
from . import gen_python_functions
from .gen_autograd_functions import gen_autograd_functions_lib, gen_autograd_functions_python
from .gen_trace_type import gen_trace_type
from .gen_variable_type import gen_variable_type
from .gen_inplace_or_view_type import gen_inplace_or_view_type
from .gen_variable_factories import gen_variable_factories
from .load_derivatives import load_derivatives
def gen_autograd(
Reported by Pylint.
Line: 38
Column: 1
from .gen_autograd_functions import gen_autograd_functions_lib, gen_autograd_functions_python
from .gen_trace_type import gen_trace_type
from .gen_variable_type import gen_variable_type
from .gen_inplace_or_view_type import gen_inplace_or_view_type
from .gen_variable_factories import gen_variable_factories
from .load_derivatives import load_derivatives
def gen_autograd(
aten_path: str,
Reported by Pylint.
Line: 39
Column: 1
from .gen_trace_type import gen_trace_type
from .gen_variable_type import gen_variable_type
from .gen_inplace_or_view_type import gen_inplace_or_view_type
from .gen_variable_factories import gen_variable_factories
from .load_derivatives import load_derivatives
def gen_autograd(
aten_path: str,
native_functions_path: str,
Reported by Pylint.
Line: 40
Column: 1
from .gen_variable_type import gen_variable_type
from .gen_inplace_or_view_type import gen_inplace_or_view_type
from .gen_variable_factories import gen_variable_factories
from .load_derivatives import load_derivatives
def gen_autograd(
aten_path: str,
native_functions_path: str,
out: str,
Reported by Pylint.
Line: 43
Column: 5
from .load_derivatives import load_derivatives
def gen_autograd(
aten_path: str,
native_functions_path: str,
out: str,
autograd_dir: str,
operator_selector: SelectiveBuilder,
disable_autograd: bool = False,
Reported by Pylint.
Line: 79
Column: 5
def gen_autograd_python(
aten_path: str,
native_functions_path: str,
out: str,
autograd_dir: str,
) -> None:
differentiability_infos = load_derivatives(
Reported by Pylint.
Line: 33
Column: 1
)
from tools.codegen.gen import parse_native_yaml
from tools.codegen.selective_build.selector import SelectiveBuilder
from typing import List
from . import gen_python_functions
from .gen_autograd_functions import gen_autograd_functions_lib, gen_autograd_functions_python
from .gen_trace_type import gen_trace_type
from .gen_variable_type import gen_variable_type
from .gen_inplace_or_view_type import gen_inplace_or_view_type
Reported by Pylint.
torch/distributed/pipeline/sync/_balance/profile.py
15 issues
Line: 16
Column: 1
from torch import Tensor
import torch.nn as nn
from ..microbatch import Batch
__all__: List[str] = []
Device = Union[torch.device, int, str]
Reported by Pylint.
Line: 21
Column: 16
__all__: List[str] = []
Device = Union[torch.device, int, str]
Tensors = Sequence[Tensor]
TensorOrTensors = Union[Tensor, Tensors]
Reported by Pylint.
Line: 27
Column: 54
TensorOrTensors = Union[Tensor, Tensors]
def layerwise_sandbox(module: nn.Sequential, device: torch.device,) -> Generator[nn.Module, None, None]:
"""Copies layers for ease to profile. It doesn't modify the given
module.
"""
for layer in module:
layer_copy = copy.deepcopy(layer)
Reported by Pylint.
Line: 44
Column: 100
batch[i] = x.detach().requires_grad_(x.requires_grad)
def profile_times(module: nn.Sequential, sample: Union[List[Any], Tensor], timeout: float, device: torch.device,) -> List[int]:
"""Profiles elapsed times per layer."""
if any(p.grad is not None for p in module.parameters()):
raise ValueError("some parameter already has gradient")
_batch = Batch(sample)
Reported by Pylint.
Line: 85
Column: 102
def profile_sizes(
module: nn.Sequential, input: Union[List[Any], Tensor], chunks: int, param_scale: float, device: torch.device,
) -> List[int]:
"""Profiles CUDA memory usage per layer."""
if device.type != "cuda":
raise ValueError("size profiler supports only CUDA device")
Reported by Pylint.
Line: 85
Column: 28
def profile_sizes(
module: nn.Sequential, input: Union[List[Any], Tensor], chunks: int, param_scale: float, device: torch.device,
) -> List[int]:
"""Profiles CUDA memory usage per layer."""
if device.type != "cuda":
raise ValueError("size profiler supports only CUDA device")
Reported by Pylint.
Line: 27
Column: 1
TensorOrTensors = Union[Tensor, Tensors]
def layerwise_sandbox(module: nn.Sequential, device: torch.device,) -> Generator[nn.Module, None, None]:
"""Copies layers for ease to profile. It doesn't modify the given
module.
"""
for layer in module:
layer_copy = copy.deepcopy(layer)
Reported by Pylint.
Line: 40
Column: 12
def detach(batch: Batch) -> None:
"""Detaches from autograd graph."""
for i, x in enumerate(batch):
batch[i] = x.detach().requires_grad_(x.requires_grad)
def profile_times(module: nn.Sequential, sample: Union[List[Any], Tensor], timeout: float, device: torch.device,) -> List[int]:
"""Profiles elapsed times per layer."""
Reported by Pylint.
Line: 44
Column: 1
batch[i] = x.detach().requires_grad_(x.requires_grad)
def profile_times(module: nn.Sequential, sample: Union[List[Any], Tensor], timeout: float, device: torch.device,) -> List[int]:
"""Profiles elapsed times per layer."""
if any(p.grad is not None for p in module.parameters()):
raise ValueError("some parameter already has gradient")
_batch = Batch(sample)
Reported by Pylint.
Line: 50
Column: 12
raise ValueError("some parameter already has gradient")
_batch = Batch(sample)
for i, x in enumerate(_batch):
_batch[i] = x.detach().to(device).requires_grad_(x.requires_grad)
time_bufs: List[List[float]] = [[] for _ in module]
begun_at = time.time()
Reported by Pylint.
tools/stats/upload_binary_size_to_scuba.py
15 issues
Line: 16
Column: 14
from tools.stats.scribe import send_to_scribe
def get_size(file_dir: str) -> int:
try:
# we should only expect one file, if no, something is wrong
file_name = glob.glob(os.path.join(file_dir, "*"))[0]
return os.stat(file_name).st_size
except Exception:
Reported by Pylint.
Line: 21
Column: 12
# we should only expect one file, if no, something is wrong
file_name = glob.glob(os.path.join(file_dir, "*"))[0]
return os.stat(file_name).st_size
except Exception:
logging.exception(f"error getting file from: {file_dir}")
return 0
def build_message(size: int) -> Dict[str, Any]:
Reported by Pylint.
Line: 22
Column: 9
file_name = glob.glob(os.path.join(file_dir, "*"))[0]
return os.stat(file_name).st_size
except Exception:
logging.exception(f"error getting file from: {file_dir}")
return 0
def build_message(size: int) -> Dict[str, Any]:
build_env_split: List[Any] = os.environ.get("BUILD_ENVIRONMENT", "").split()
Reported by Pylint.
Line: 26
Column: 19
return 0
def build_message(size: int) -> Dict[str, Any]:
build_env_split: List[Any] = os.environ.get("BUILD_ENVIRONMENT", "").split()
pkg_type, py_ver, cu_ver, *_ = build_env_split + [None, None, None]
os_name = os.uname()[0].lower()
if os_name == "darwin":
os_name = "macos"
Reported by Pylint.
Line: 68
Column: 26
print(res)
def report_android_sizes(file_dir: str) -> None:
def gen_sizes() -> Generator[List[Any], None, None]:
# we should only expect one file, if no, something is wrong
aar_files = list(pathlib.Path(file_dir).rglob("pytorch_android-*.aar"))
if len(aar_files) != 1:
logging.exception(f"error getting aar files from: {file_dir} / {aar_files}")
Reported by Pylint.
Line: 73
Column: 13
# we should only expect one file, if no, something is wrong
aar_files = list(pathlib.Path(file_dir).rglob("pytorch_android-*.aar"))
if len(aar_files) != 1:
logging.exception(f"error getting aar files from: {file_dir} / {aar_files}")
return
aar_file = aar_files[0]
zf = zipfile.ZipFile(aar_file)
for info in zf.infolist():
Reported by Pylint.
Line: 99
Column: 3
yield {
"normal": {
"os": "android",
# TODO: create dedicated columns
"pkg_type": "{}/{}/{}".format(android_build_type, arch, lib),
"cu_ver": "", # dummy value for derived field `build_name`
"py_ver": "", # dummy value for derived field `build_name`
"pr": os.environ.get("CIRCLE_PR_NUMBER"),
"build_num": os.environ.get("CIRCLE_BUILD_NUM"),
Reported by Pylint.
Line: 136
Column: 16
# Sending the message anyway if no size info is collected.
try:
send_message([build_message(size)])
except Exception:
logging.exception("can't send message")
Reported by Pylint.
Line: 1
Column: 1
import glob
import json
import logging
import os
import os.path
import pathlib
import re
import sys
import time
Reported by Pylint.
Line: 16
Column: 1
from tools.stats.scribe import send_to_scribe
def get_size(file_dir: str) -> int:
try:
# we should only expect one file, if no, something is wrong
file_name = glob.glob(os.path.join(file_dir, "*"))[0]
return os.stat(file_name).st_size
except Exception:
Reported by Pylint.
torch/distributed/remote_device.py
15 issues
Line: 24
Column: 50
and "cuda:1", just represent local devices.
"""
def __init__(self, remote_device: Union[str, torch.device]):
PARSE_ERROR = (
f"Could not parse remote_device: {remote_device}. The valid format is "
"'<workername>/<device>' or 'rank:<rank>/<device>' or '<device>'"
)
self._worker_name = None
Reported by Pylint.
Line: 31
Column: 48
)
self._worker_name = None
self._rank = None
self._device: Optional[Union[str, int, torch.device]] = None
if isinstance(remote_device, torch.device):
self._device = remote_device
elif isinstance(remote_device, str):
fields = remote_device.split("/")
Reported by Pylint.
Line: 33
Column: 38
self._rank = None
self._device: Optional[Union[str, int, torch.device]] = None
if isinstance(remote_device, torch.device):
self._device = remote_device
elif isinstance(remote_device, str):
fields = remote_device.split("/")
if len(fields) == 2:
self._worker_name, self._device = fields
Reported by Pylint.
Line: 56
Column: 24
raise ValueError(PARSE_ERROR)
# Validate the device.
self._device = torch.device(self._device)
# Check for rank based format.
if self._worker_name is not None:
fields = self._worker_name.split(":")
if len(fields) == 2:
Reported by Pylint.
Line: 75
Column: 13
def _is_valid_local_device(device):
# Check for torch.device
try:
torch.device(device)
return True
except Exception:
return False
def worker_name(self) -> Optional[str]:
Reported by Pylint.
Line: 94
Column: 25
"""
return self._rank
def device(self) -> torch.device:
"""
Returns the local device on the remote worker.
"""
return self._device # type: ignore[return-value]
Reported by Pylint.
Line: 77
Column: 16
try:
torch.device(device)
return True
except Exception:
return False
def worker_name(self) -> Optional[str]:
"""
Returns the name of remote worker representing the remote device.
Reported by Pylint.
Line: 1
Column: 1
from typing import Optional, Union
import torch
class _remote_device(object):
"""
Represents a device on a remote worker.
Reported by Pylint.
Line: 6
Column: 1
import torch
class _remote_device(object):
"""
Represents a device on a remote worker.
Args:
remote_device (str or torch.device): Represents a device on a remote worker.
Reported by Pylint.
Line: 6
Column: 1
import torch
class _remote_device(object):
"""
Represents a device on a remote worker.
Args:
remote_device (str or torch.device): Represents a device on a remote worker.
Reported by Pylint.
torch/_tensor_docs.py
15 issues
Line: 5
Column: 1
import torch._C
from torch._C import _add_docstr as add_docstr
from ._torch_docs import parse_kwargs
from ._torch_docs import reproducibility_notes
def add_docstr_all(method, docstr):
add_docstr(getattr(torch._C._TensorBase, method), docstr)
Reported by Pylint.
Line: 6
Column: 1
import torch._C
from torch._C import _add_docstr as add_docstr
from ._torch_docs import parse_kwargs
from ._torch_docs import reproducibility_notes
def add_docstr_all(method, docstr):
add_docstr(getattr(torch._C._TensorBase, method), docstr)
Reported by Pylint.
Line: 10
Column: 24
def add_docstr_all(method, docstr):
add_docstr(getattr(torch._C._TensorBase, method), docstr)
common_args = parse_kwargs("""
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
""")
Reported by Pylint.
Line: 10
Column: 24
def add_docstr_all(method, docstr):
add_docstr(getattr(torch._C._TensorBase, method), docstr)
common_args = parse_kwargs("""
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
""")
Reported by Pylint.
Line: 1770
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b608_hardcoded_sql_expressions.html
""")
add_docstr_all('index_add_',
r"""
index_add_(dim, index, tensor, *, alpha=1) -> Tensor
Accumulate the elements of :attr:`alpha` times :attr:`tensor` into the :attr:`self`
tensor by adding to the indices in the order given in :attr:`index`. For example,
if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of
Reported by Bandit.
Line: 1
Column: 1
"""Adds docstrings to Tensor functions"""
import torch._C
from torch._C import _add_docstr as add_docstr
from ._torch_docs import parse_kwargs
from ._torch_docs import reproducibility_notes
def add_docstr_all(method, docstr):
Reported by Pylint.
Line: 9
Column: 1
from ._torch_docs import reproducibility_notes
def add_docstr_all(method, docstr):
add_docstr(getattr(torch._C._TensorBase, method), docstr)
common_args = parse_kwargs("""
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
Reported by Pylint.
Line: 49
Column: 1
.. warning::
When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
The equivalents using ``clone()`` and ``detach()`` are recommended.
Args:
data (array_like): The returned Tensor copies :attr:`data`.
Reported by Pylint.
Line: 50
Column: 1
When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
The equivalents using ``clone()`` and ``detach()`` are recommended.
Args:
data (array_like): The returned Tensor copies :attr:`data`.
{dtype}
Reported by Pylint.
Line: 1686
Column: 1
>>> x = torch.randn(3, 4, 5, device='cuda:0')
>>> x.get_device()
0
>>> x.cpu().get_device() # RuntimeError: get_device is not implemented for type torch.FloatTensor
""")
add_docstr_all('values',
r"""
values() -> Tensor
Reported by Pylint.
tools/codegen/api/dispatcher.py
15 issues
Line: 1
Column: 1
from tools.codegen.model import (Argument, FunctionSchema, Return,
SelfArgument, TensorOptionsArguments, Type,
assert_never)
from tools.codegen.api.types import ArgName, Binding, NamedCType, CType
from tools.codegen.api import cpp
from tools.codegen.utils import concatMap
import itertools
Reported by Pylint.
Line: 9
Column: 1
from tools.codegen.api import cpp
from tools.codegen.utils import concatMap
import itertools
from typing import Sequence, List, Union
# This file describes the translation of JIT schema to the dispatcher
# API, the *unboxed* calling convention by which invocations through
# the dispatcher are made. Historically, the dispatcher API matched
Reported by Pylint.
Line: 10
Column: 1
from tools.codegen.utils import concatMap
import itertools
from typing import Sequence, List, Union
# This file describes the translation of JIT schema to the dispatcher
# API, the *unboxed* calling convention by which invocations through
# the dispatcher are made. Historically, the dispatcher API matched
# the C++ API, but with the establishment of the boxed API, we've
Reported by Pylint.
Line: 27
Column: 1
# arguments.
#
def name(func: FunctionSchema) -> str:
return cpp.name(func)
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
# This is a faux amis. If it makes sense in the future to add
# more special cases here, or invert things so cpp.argument_type
Reported by Pylint.
Line: 30
Column: 1
def name(func: FunctionSchema) -> str:
return cpp.name(func)
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
# This is a faux amis. If it makes sense in the future to add
# more special cases here, or invert things so cpp.argument_type
# calls this, or just completely inline the function, please do
# it.
return cpp.argumenttype_type(t, mutable=mutable, binds=binds)
Reported by Pylint.
Line: 30
Column: 1
def name(func: FunctionSchema) -> str:
return cpp.name(func)
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
# This is a faux amis. If it makes sense in the future to add
# more special cases here, or invert things so cpp.argument_type
# calls this, or just completely inline the function, please do
# it.
return cpp.argumenttype_type(t, mutable=mutable, binds=binds)
Reported by Pylint.
Line: 37
Column: 1
# it.
return cpp.argumenttype_type(t, mutable=mutable, binds=binds)
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
def returns_type(rs: Sequence[Return]) -> CType:
# At present, there is no difference. But there could be!
return cpp.returns_type(rs)
Reported by Pylint.
Line: 37
Column: 1
# it.
return cpp.argumenttype_type(t, mutable=mutable, binds=binds)
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
def returns_type(rs: Sequence[Return]) -> CType:
# At present, there is no difference. But there could be!
return cpp.returns_type(rs)
Reported by Pylint.
Line: 40
Column: 1
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
def returns_type(rs: Sequence[Return]) -> CType:
# At present, there is no difference. But there could be!
return cpp.returns_type(rs)
def jit_arguments(func: FunctionSchema) -> List[Argument]:
def to_argument(a: Union[Argument, TensorOptionsArguments, SelfArgument]) -> List[Argument]:
Reported by Pylint.
Line: 40
Column: 1
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
def returns_type(rs: Sequence[Return]) -> CType:
# At present, there is no difference. But there could be!
return cpp.returns_type(rs)
def jit_arguments(func: FunctionSchema) -> List[Argument]:
def to_argument(a: Union[Argument, TensorOptionsArguments, SelfArgument]) -> List[Argument]:
Reported by Pylint.
test/test_typing.py
15 issues
Line: 10
Column: 1
from collections import defaultdict
from typing import IO, Dict, List, Optional
import pytest
try:
from mypy import api
except ImportError:
NO_MYPY = True
Reported by Pylint.
Line: 107
Column: 5
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR))
def test_fail(path):
__tracebackhide__ = True
with open(path) as fin:
lines = fin.readlines()
errors = defaultdict(lambda: "")
Reported by Pylint.
Line: 151
Column: 16
"""
def _test_fail(path: str, error: str, expected_error: Optional[str], lineno: int) -> None:
if expected_error is None:
raise AssertionError(_FAIL_MSG1.format(lineno, error))
elif error not in expected_error:
raise AssertionError(_FAIL_MSG2.format(lineno, expected_error, error))
Reported by Pylint.
Line: 201
Column: 5
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR))
def test_reveal(path):
__tracebackhide__ = True
with open(path) as fin:
lines = _parse_reveals(fin)
output_mypy = OUTPUT_MYPY
Reported by Pylint.
Line: 229
Column: 18
"""
def _test_reveal(path: str, reveal: str, expected_reveal: str, lineno: int) -> None:
if reveal not in expected_reveal:
raise AssertionError(_REVEAL_MSG.format(lineno, expected_reveal, reveal))
if __name__ == '__main__':
Reported by Pylint.
Line: 1
Column: 1
# based on NumPy numpy/typing/tests/test_typing.py
import itertools
import os
import re
import shutil
from collections import defaultdict
from typing import IO, Dict, List, Optional
Reported by Pylint.
Line: 70
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
directory,
]
)
assert not stderr, directory
stdout = stdout.replace("*", "")
# Parse the output
iterator = itertools.groupby(stdout.split("\n"), key=_key_func)
OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k)
Reported by Bandit.
Line: 78
Column: 1
OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k)
def get_test_cases(directory):
for root, _, files in os.walk(directory):
for fname in files:
if os.path.splitext(fname)[-1] == ".py":
fullpath = os.path.join(root, fname)
# Use relative path for nice py.test name
Reported by Pylint.
Line: 95
Column: 1
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_success(path):
# Alias `OUTPUT_MYPY` so that it appears in the local namespace
output_mypy = OUTPUT_MYPY
if path in output_mypy:
msg = "Unexpected mypy output\n\n"
msg += "\n".join(_strip_filename(v) for v in output_mypy[path])
Reported by Pylint.
Line: 106
Column: 1
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR))
def test_fail(path):
__tracebackhide__ = True
with open(path) as fin:
lines = fin.readlines()
Reported by Pylint.