The following issues were found
torch/utils/mobile_optimizer.py
33 issues
Line: 62
Column: 32
backend = backend.lower()
if backend == 'cpu':
optimized_cpp_module = torch._C._jit_pass_optimize_for_mobile(
script_module._c,
optimization_blocklist,
preserved_methods_str)
elif backend == 'vulkan':
optimized_cpp_module = torch._C._jit_pass_vulkan_optimize_for_mobile(script_module._c, preserved_methods_str)
Reported by Pylint.
Line: 62
Column: 32
backend = backend.lower()
if backend == 'cpu':
optimized_cpp_module = torch._C._jit_pass_optimize_for_mobile(
script_module._c,
optimization_blocklist,
preserved_methods_str)
elif backend == 'vulkan':
optimized_cpp_module = torch._C._jit_pass_vulkan_optimize_for_mobile(script_module._c, preserved_methods_str)
Reported by Pylint.
Line: 63
Column: 13
backend = backend.lower()
if backend == 'cpu':
optimized_cpp_module = torch._C._jit_pass_optimize_for_mobile(
script_module._c,
optimization_blocklist,
preserved_methods_str)
elif backend == 'vulkan':
optimized_cpp_module = torch._C._jit_pass_vulkan_optimize_for_mobile(script_module._c, preserved_methods_str)
elif backend == 'metal':
Reported by Pylint.
Line: 67
Column: 78
optimization_blocklist,
preserved_methods_str)
elif backend == 'vulkan':
optimized_cpp_module = torch._C._jit_pass_vulkan_optimize_for_mobile(script_module._c, preserved_methods_str)
elif backend == 'metal':
optimized_cpp_module = torch._C._jit_pass_metal_optimize_for_mobile(script_module._c, preserved_methods_str)
else:
raise TypeError("Unknown backend, must be one of 'CPU', 'Vulkan' or 'Metal'")
Reported by Pylint.
Line: 67
Column: 32
optimization_blocklist,
preserved_methods_str)
elif backend == 'vulkan':
optimized_cpp_module = torch._C._jit_pass_vulkan_optimize_for_mobile(script_module._c, preserved_methods_str)
elif backend == 'metal':
optimized_cpp_module = torch._C._jit_pass_metal_optimize_for_mobile(script_module._c, preserved_methods_str)
else:
raise TypeError("Unknown backend, must be one of 'CPU', 'Vulkan' or 'Metal'")
Reported by Pylint.
Line: 67
Column: 32
optimization_blocklist,
preserved_methods_str)
elif backend == 'vulkan':
optimized_cpp_module = torch._C._jit_pass_vulkan_optimize_for_mobile(script_module._c, preserved_methods_str)
elif backend == 'metal':
optimized_cpp_module = torch._C._jit_pass_metal_optimize_for_mobile(script_module._c, preserved_methods_str)
else:
raise TypeError("Unknown backend, must be one of 'CPU', 'Vulkan' or 'Metal'")
Reported by Pylint.
Line: 69
Column: 77
elif backend == 'vulkan':
optimized_cpp_module = torch._C._jit_pass_vulkan_optimize_for_mobile(script_module._c, preserved_methods_str)
elif backend == 'metal':
optimized_cpp_module = torch._C._jit_pass_metal_optimize_for_mobile(script_module._c, preserved_methods_str)
else:
raise TypeError("Unknown backend, must be one of 'CPU', 'Vulkan' or 'Metal'")
return torch.jit._recursive.wrap_cpp_module(optimized_cpp_module)
Reported by Pylint.
Line: 69
Column: 32
elif backend == 'vulkan':
optimized_cpp_module = torch._C._jit_pass_vulkan_optimize_for_mobile(script_module._c, preserved_methods_str)
elif backend == 'metal':
optimized_cpp_module = torch._C._jit_pass_metal_optimize_for_mobile(script_module._c, preserved_methods_str)
else:
raise TypeError("Unknown backend, must be one of 'CPU', 'Vulkan' or 'Metal'")
return torch.jit._recursive.wrap_cpp_module(optimized_cpp_module)
Reported by Pylint.
Line: 69
Column: 32
elif backend == 'vulkan':
optimized_cpp_module = torch._C._jit_pass_vulkan_optimize_for_mobile(script_module._c, preserved_methods_str)
elif backend == 'metal':
optimized_cpp_module = torch._C._jit_pass_metal_optimize_for_mobile(script_module._c, preserved_methods_str)
else:
raise TypeError("Unknown backend, must be one of 'CPU', 'Vulkan' or 'Metal'")
return torch.jit._recursive.wrap_cpp_module(optimized_cpp_module)
Reported by Pylint.
Line: 73
Column: 12
else:
raise TypeError("Unknown backend, must be one of 'CPU', 'Vulkan' or 'Metal'")
return torch.jit._recursive.wrap_cpp_module(optimized_cpp_module)
def generate_mobile_module_lints(script_module: torch.jit.ScriptModule):
"""
Args:
Reported by Pylint.
caffe2/python/dataset.py
33 issues
Line: 267
Column: 5
"""Return the list of field names for this dataset."""
return self.fields
def field_types(self):
"""
Return the list of field dtypes for this dataset.
If a list of strings, not a schema.Struct, was passed to the
constructor, this will return a list of dtype(np.void).
Reported by Pylint.
Line: 35
Column: 5
self.enforce_batch_size = enforce_batch_size
self.cursor = None
def setup_ex(self, init_net, exit_net):
if self.cursor is None:
self.cursor = init_net.CreateTreeCursor(
[],
init_net.NextScopedBlob(self.name),
fields=self.dataset.fields)
Reported by Pylint.
Line: 35
Column: 34
self.enforce_batch_size = enforce_batch_size
self.cursor = None
def setup_ex(self, init_net, exit_net):
if self.cursor is None:
self.cursor = init_net.CreateTreeCursor(
[],
init_net.NextScopedBlob(self.name),
fields=self.dataset.fields)
Reported by Pylint.
Line: 71
Column: 5
self.loop_over = loop_over
self.enforce_batch_size = enforce_batch_size
def setup_ex(self, init_net, exit_net):
if self.cursor is None:
self.cursor = init_net.CreateTreeCursor(
[],
init_net.NextScopedBlob(self.name),
fields=self.dataset.fields)
Reported by Pylint.
Line: 71
Column: 34
self.loop_over = loop_over
self.enforce_batch_size = enforce_batch_size
def setup_ex(self, init_net, exit_net):
if self.cursor is None:
self.cursor = init_net.CreateTreeCursor(
[],
init_net.NextScopedBlob(self.name),
fields=self.dataset.fields)
Reported by Pylint.
Line: 86
Column: 9
offsets = net.ComputeOffset(
[self.cursor] + self.dataset.content().field_blobs(),
'offsets')
self.offsets = offsets
def sort_and_shuffle(self, net, sort_by_field=None,
shuffle_size=1, batch_size=1):
# no sorting by default
content = self.dataset.content()
Reported by Pylint.
Line: 130
Column: 34
self._content = content
self.mutex = None
def setup_ex(self, init_net, exit_net):
if self.mutex is None:
self.mutex = init_net.CreateMutex([])
def write(self, writer_net, fields):
"""
Reported by Pylint.
Line: 130
Column: 5
self._content = content
self.mutex = None
def setup_ex(self, init_net, exit_net):
if self.mutex is None:
self.mutex = init_net.CreateMutex([])
def write(self, writer_net, fields):
"""
Reported by Pylint.
Line: 156
Column: 9
def commit(self, finish_net):
"""Commit is a no-op for an in-memory dataset."""
pass
def Const(net, value, dtype=None, name=None):
"""
Create a 'constant' by first creating an external input in the given
Reported by Pylint.
Line: 173
Column: 40
return blob
def execution_step_with_progress(name, init_net, substeps, rows_read):
# progress reporter
report_net = core.Net('report_net')
report_net.Print([rows_read], [])
return core.execution_step(
name,
Reported by Pylint.
torch/jit/_monkeytype_config.py
33 issues
Line: 12
Column: 5
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
from monkeytype.db.base import CallTraceThunk, CallTraceStore, CallTraceStoreLogger # type: ignore[import]
from monkeytype.config import _startswith, LIB_PATHS # type: ignore[import]
from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import]
except ImportError:
_IS_MONKEYTYPE_INSTALLED = False
Reported by Pylint.
Line: 47
Column: 9
class JitTypeTraceStoreLogger(CallTraceStoreLogger):
"""A JitTypeCallTraceLogger that stores logged traces in a CallTraceStore."""
def __init__(self, store: CallTraceStore):
super().__init__(store)
def log(self, trace: CallTrace) -> None:
self.traces.append(trace)
Reported by Pylint.
Line: 69
Column: 13
def filter(
self,
qualified_name: str,
qualname_prefix: Optional[str] = None,
limit: int = 2000
) -> List[CallTraceThunk]:
return self.trace_records[qualified_name]
def analyze(self, qualified_name: str) -> Dict:
Reported by Pylint.
Line: 70
Column: 13
self,
qualified_name: str,
qualname_prefix: Optional[str] = None,
limit: int = 2000
) -> List[CallTraceThunk]:
return self.trace_records[qualified_name]
def analyze(self, qualified_name: str) -> Dict:
# Analyze the types for the given module
Reported by Pylint.
Line: 109
Column: 3
_all_type = _all_type.lstrip(" ") # Remove any trailing spaces
if len(types) == 2 and 'NoneType' in _all_type:
# TODO: To remove this check once Union suppport in TorchScript lands.
all_args[arg] = {get_optional_of_element_type(_all_type)}
elif len(types) > 1:
all_args[arg] = {'Any'}
else:
all_args[arg] = {_all_type[:-1]}
Reported by Pylint.
Line: 1
Column: 1
import inspect
import typing
import pathlib
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
Reported by Pylint.
Line: 5
Column: 1
import typing
import pathlib
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
try:
Reported by Pylint.
Line: 6
Column: 1
import pathlib
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
Reported by Pylint.
Line: 7
Column: 1
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
Reported by Pylint.
Line: 13
Column: 1
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
from monkeytype.db.base import CallTraceThunk, CallTraceStore, CallTraceStoreLogger # type: ignore[import]
from monkeytype.config import _startswith, LIB_PATHS # type: ignore[import]
from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import]
except ImportError:
_IS_MONKEYTYPE_INSTALLED = False
Reported by Pylint.
torch/distributions/relaxed_bernoulli.py
32 issues
Line: 45
Column: 27
self.logits, = broadcast_all(logits)
self._param = self.probs if probs is not None else self.logits
if is_scalar:
batch_shape = torch.Size()
else:
batch_shape = self._param.size()
super(LogitRelaxedBernoulli, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
Reported by Pylint.
Line: 52
Column: 23
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(LogitRelaxedBernoulli, _instance)
batch_shape = torch.Size(batch_shape)
new.temperature = self.temperature
if 'probs' in self.__dict__:
new.probs = self.probs.expand(batch_shape)
new._param = new.probs
if 'logits' in self.__dict__:
Reported by Pylint.
Line: 68
Column: 5
return self._param.new(*args, **kwargs)
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
Reported by Pylint.
Line: 72
Column: 5
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
@property
def param_shape(self):
return self._param.size()
Reported by Pylint.
Line: 79
Column: 36
def param_shape(self):
return self._param.size()
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
probs = clamp_probs(self.probs.expand(shape))
uniforms = clamp_probs(torch.rand(shape, dtype=probs.dtype, device=probs.device))
return (uniforms.log() - (-uniforms).log1p() + probs.log() - (-probs).log1p()) / self.temperature
Reported by Pylint.
Line: 82
Column: 32
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
probs = clamp_probs(self.probs.expand(shape))
uniforms = clamp_probs(torch.rand(shape, dtype=probs.dtype, device=probs.device))
return (uniforms.log() - (-uniforms).log1p() + probs.log() - (-probs).log1p()) / self.temperature
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
Reported by Pylint.
Line: 10
Column: 1
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, clamp_probs
class LogitRelaxedBernoulli(Distribution):
r"""
Creates a LogitRelaxedBernoulli distribution parameterized by :attr:`probs`
or :attr:`logits` (but not both), which is the logit of a RelaxedBernoulli
distribution.
Reported by Pylint.
Line: 10
Column: 1
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, clamp_probs
class LogitRelaxedBernoulli(Distribution):
r"""
Creates a LogitRelaxedBernoulli distribution parameterized by :attr:`probs`
or :attr:`logits` (but not both), which is the logit of a RelaxedBernoulli
distribution.
Reported by Pylint.
Line: 10
Column: 1
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, clamp_probs
class LogitRelaxedBernoulli(Distribution):
r"""
Creates a LogitRelaxedBernoulli distribution parameterized by :attr:`probs`
or :attr:`logits` (but not both), which is the logit of a RelaxedBernoulli
distribution.
Reported by Pylint.
Line: 10
Column: 1
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, clamp_probs
class LogitRelaxedBernoulli(Distribution):
r"""
Creates a LogitRelaxedBernoulli distribution parameterized by :attr:`probs`
or :attr:`logits` (but not both), which is the logit of a RelaxedBernoulli
distribution.
Reported by Pylint.
test/jit/test_script_profile.py
32 issues
Line: 4
Column: 1
import os
import sys
import torch
from torch import nn
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 5
Column: 1
import sys
import torch
from torch import nn
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 10
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 24
Column: 23
self.lstm2 = nn.LSTMCell(51, 51)
self.linear = nn.Linear(51, 1)
def forward(self, input):
outputs = []
h_t = torch.zeros(input.size(0), 51)
c_t = torch.zeros(input.size(0), 51)
h_t2 = torch.zeros(input.size(0), 51)
c_t2 = torch.zeros(input.size(0), 51)
Reported by Pylint.
Line: 43
Column: 13
def test_basic(self):
seq = torch.jit.script(Sequence())
p = torch.jit._ScriptProfile()
p.enable()
seq(torch.rand((10, 100)))
p.disable()
self.assertNotEqual(p.dump_string(), "")
Reported by Pylint.
Line: 54
Column: 17
@torch.jit.script
def fn():
p = torch.jit._ScriptProfile()
p.enable()
_ = seq(torch.rand((10, 100)))
p.disable()
return p
Reported by Pylint.
Line: 64
Column: 21
def test_multi(self):
seq = torch.jit.script(Sequence())
profiles = [torch.jit._ScriptProfile() for _ in range(5)]
for p in profiles:
p.enable()
last = None
while len(profiles) > 0:
Reported by Pylint.
Line: 84
Column: 17
@torch.jit.script
def fn():
p = torch.jit._ScriptProfile()
p.enable()
_ = seq(torch.rand((10, 100)))
p.disable()
stats0 = p.dump_string()
Reported by Pylint.
Line: 106
Column: 13
self.assertNotEqual(s1, s2)
def test_empty(self):
p = torch.jit._ScriptProfile()
p.enable()
p.disable()
self.assertEqual(p.dump_string(), "")
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
import torch
from torch import nn
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
torch/distributions/relaxed_categorical.py
32 issues
Line: 46
Column: 23
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(ExpRelaxedCategorical, _instance)
batch_shape = torch.Size(batch_shape)
new.temperature = self.temperature
new._categorical = self._categorical.expand(batch_shape)
super(ExpRelaxedCategorical, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
Reported by Pylint.
Line: 68
Column: 36
def probs(self):
return self._categorical.probs
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
uniforms = clamp_probs(torch.rand(shape, dtype=self.logits.dtype, device=self.logits.device))
gumbels = -((-(uniforms.log())).log())
scores = (self.logits + gumbels) / self.temperature
return scores - scores.logsumexp(dim=-1, keepdim=True)
Reported by Pylint.
Line: 70
Column: 32
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
uniforms = clamp_probs(torch.rand(shape, dtype=self.logits.dtype, device=self.logits.device))
gumbels = -((-(uniforms.log())).log())
scores = (self.logits + gumbels) / self.temperature
return scores - scores.logsumexp(dim=-1, keepdim=True)
def log_prob(self, value):
Reported by Pylint.
Line: 80
Column: 22
if self._validate_args:
self._validate_sample(value)
logits, value = broadcast_all(self.logits, value)
log_scale = (torch.full_like(self.temperature, float(K)).lgamma() -
self.temperature.log().mul(-(K - 1)))
score = logits - value.mul(self.temperature)
score = (score - score.logsumexp(dim=-1, keepdim=True)).sum(-1)
return score + log_scale
Reported by Pylint.
Line: 10
Column: 1
from torch.distributions.transforms import ExpTransform
class ExpRelaxedCategorical(Distribution):
r"""
Creates a ExpRelaxedCategorical parameterized by
:attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
Returns the log of a point in the simplex. Based on the interface to
:class:`OneHotCategorical`.
Reported by Pylint.
Line: 10
Column: 1
from torch.distributions.transforms import ExpTransform
class ExpRelaxedCategorical(Distribution):
r"""
Creates a ExpRelaxedCategorical parameterized by
:attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
Returns the log of a point in the simplex. Based on the interface to
:class:`OneHotCategorical`.
Reported by Pylint.
Line: 10
Column: 1
from torch.distributions.transforms import ExpTransform
class ExpRelaxedCategorical(Distribution):
r"""
Creates a ExpRelaxedCategorical parameterized by
:attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
Returns the log of a point in the simplex. Based on the interface to
:class:`OneHotCategorical`.
Reported by Pylint.
Line: 10
Column: 1
from torch.distributions.transforms import ExpTransform
class ExpRelaxedCategorical(Distribution):
r"""
Creates a ExpRelaxedCategorical parameterized by
:attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
Returns the log of a point in the simplex. Based on the interface to
:class:`OneHotCategorical`.
Reported by Pylint.
Line: 10
Column: 1
from torch.distributions.transforms import ExpTransform
class ExpRelaxedCategorical(Distribution):
r"""
Creates a ExpRelaxedCategorical parameterized by
:attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
Returns the log of a point in the simplex. Based on the interface to
:class:`OneHotCategorical`.
Reported by Pylint.
Line: 10
Column: 1
from torch.distributions.transforms import ExpTransform
class ExpRelaxedCategorical(Distribution):
r"""
Creates a ExpRelaxedCategorical parameterized by
:attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
Returns the log of a point in the simplex. Based on the interface to
:class:`OneHotCategorical`.
Reported by Pylint.
torch/distributed/pipeline/sync/pipeline.py
32 issues
Line: 17
Column: 1
from torch import Tensor, nn
from torch.autograd.profiler import record_function
from .checkpoint import Checkpointing
from .copy import Copy, Wait
from .dependency import fork, join
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
Reported by Pylint.
Line: 18
Column: 1
from torch.autograd.profiler import record_function
from .checkpoint import Checkpointing
from .copy import Copy, Wait
from .dependency import fork, join
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
Reported by Pylint.
Line: 19
Column: 1
from .checkpoint import Checkpointing
from .copy import Copy, Wait
from .dependency import fork, join
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers
Reported by Pylint.
Line: 20
Column: 1
from .checkpoint import Checkpointing
from .copy import Copy, Wait
from .dependency import fork, join
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers
Reported by Pylint.
Line: 21
Column: 1
from .copy import Copy, Wait
from .dependency import fork, join
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers
__all__: List[str] = []
Reported by Pylint.
Line: 22
Column: 1
from .dependency import fork, join
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers
__all__: List[str] = []
Reported by Pylint.
Line: 23
Column: 1
from .microbatch import Batch
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers
__all__: List[str] = []
Reported by Pylint.
Line: 24
Column: 1
from .skip.layout import SkipLayout
from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker
from .stream import AbstractStream, current_stream, use_device
from .worker import Task, create_workers
__all__: List[str] = []
Tensors = Sequence[Tensor]
Reported by Pylint.
Line: 37
Column: 15
# Queue is generic only in stubs.
# https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime
if TYPE_CHECKING:
InQueue = Queue[Optional["Task"]]
OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]]
else:
InQueue = Queue
OutQueue = Queue
Reported by Pylint.
Line: 38
Column: 16
# https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime
if TYPE_CHECKING:
InQueue = Queue[Optional["Task"]]
OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]]
else:
InQueue = Queue
OutQueue = Queue
Reported by Pylint.
test/distributed/elastic/rendezvous/utils_test.py
32 issues
Line: 14
Column: 1
from typing import List
from unittest import TestCase
from torch.distributed.elastic.rendezvous.utils import (
_PeriodicTimer,
_delay,
_matches_machine_hostname,
_parse_rendezvous_config,
_try_parse_port,
Reported by Pylint.
Line: 238
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b104_hardcoded_bind_all_interfaces.html
def test_matches_machine_hostname_returns_false_if_hostname_does_not_match(
self,
) -> None:
hosts = ["dummy", "0.0.0.0", "::2"]
for host in hosts:
with self.subTest(host=host):
self.assertFalse(_matches_machine_hostname(host))
Reported by Bandit.
Line: 334
Column: 22
timer_stop_event = threading.Event()
def log_call(self):
nonlocal timer_begin_time, call_count
actual_call_intervals.append(time.monotonic() - timer_begin_time)
call_count += 1
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import threading
import time
import socket
Reported by Pylint.
Line: 24
Column: 1
)
class UtilsTest(TestCase):
def test_parse_rendezvous_config_returns_dict(self) -> None:
expected_config = {
"a": "dummy1",
"b": "dummy2",
"c": "dummy3=dummy4",
Reported by Pylint.
Line: 25
Column: 5
class UtilsTest(TestCase):
def test_parse_rendezvous_config_returns_dict(self) -> None:
expected_config = {
"a": "dummy1",
"b": "dummy2",
"c": "dummy3=dummy4",
"d": "dummy5/dummy6",
Reported by Pylint.
Line: 39
Column: 5
self.assertEqual(config, expected_config)
def test_parse_rendezvous_returns_empty_dict_if_str_is_empty(self) -> None:
config_strs = ["", " "]
for config_str in config_strs:
with self.subTest(config_str=config_str):
config = _parse_rendezvous_config(config_str)
Reported by Pylint.
Line: 48
Column: 5
self.assertEqual(config, {})
def test_parse_rendezvous_raises_error_if_str_is_invalid(self) -> None:
config_strs = [
"a=dummy1,",
"a=dummy1,,c=dummy2",
"a=dummy1, ,c=dummy2",
"a=dummy1,= ,c=dummy2",
Reported by Pylint.
Line: 68
Column: 5
):
_parse_rendezvous_config(config_str)
def test_parse_rendezvous_raises_error_if_value_is_empty(self) -> None:
config_strs = [
"b=dummy1,a,c=dummy2",
"b=dummy1,c=dummy2,a",
"b=dummy1,a=,c=dummy2",
" a ",
Reported by Pylint.
Line: 84
Column: 5
):
_parse_rendezvous_config(config_str)
def test_try_parse_port_returns_port(self) -> None:
port = _try_parse_port("123")
self.assertEqual(port, 123)
def test_try_parse_port_returns_none_if_str_is_invalid(self) -> None:
Reported by Pylint.
torch/fx/experimental/fx2trt/example/fx2trt_example.py
32 issues
Line: 3
Column: 1
from typing import Tuple, Dict, Callable, Any
import torch
import torch.fx
import torchvision.models as models
import torch.fx.passes.splitter_base as splitter_base
import torch.fx.passes.operator_support as op_support
import torch.fx.passes.net_min_base as net_min_base
from torch.fx.experimental.fx2trt.fx2trt import TRTInterpreter, InputTensorSpec, TRTModule
Reported by Pylint.
Line: 4
Column: 1
from typing import Tuple, Dict, Callable, Any
import torch
import torch.fx
import torchvision.models as models
import torch.fx.passes.splitter_base as splitter_base
import torch.fx.passes.operator_support as op_support
import torch.fx.passes.net_min_base as net_min_base
from torch.fx.experimental.fx2trt.fx2trt import TRTInterpreter, InputTensorSpec, TRTModule
Reported by Pylint.
Line: 5
Column: 1
import torch
import torch.fx
import torchvision.models as models
import torch.fx.passes.splitter_base as splitter_base
import torch.fx.passes.operator_support as op_support
import torch.fx.passes.net_min_base as net_min_base
from torch.fx.experimental.fx2trt.fx2trt import TRTInterpreter, InputTensorSpec, TRTModule
Reported by Pylint.
Line: 6
Column: 1
import torch
import torch.fx
import torchvision.models as models
import torch.fx.passes.splitter_base as splitter_base
import torch.fx.passes.operator_support as op_support
import torch.fx.passes.net_min_base as net_min_base
from torch.fx.experimental.fx2trt.fx2trt import TRTInterpreter, InputTensorSpec, TRTModule
Reported by Pylint.
Line: 7
Column: 1
import torch.fx
import torchvision.models as models
import torch.fx.passes.splitter_base as splitter_base
import torch.fx.passes.operator_support as op_support
import torch.fx.passes.net_min_base as net_min_base
from torch.fx.experimental.fx2trt.fx2trt import TRTInterpreter, InputTensorSpec, TRTModule
# The purpose of this example is to demonstrate the overall flow of lowering a PyTorch
Reported by Pylint.
Line: 8
Column: 1
import torchvision.models as models
import torch.fx.passes.splitter_base as splitter_base
import torch.fx.passes.operator_support as op_support
import torch.fx.passes.net_min_base as net_min_base
from torch.fx.experimental.fx2trt.fx2trt import TRTInterpreter, InputTensorSpec, TRTModule
# The purpose of this example is to demonstrate the overall flow of lowering a PyTorch
# model to TensorRT via FX with existing FX based tooling. The general lowering flow
Reported by Pylint.
Line: 9
Column: 1
import torch.fx.passes.splitter_base as splitter_base
import torch.fx.passes.operator_support as op_support
import torch.fx.passes.net_min_base as net_min_base
from torch.fx.experimental.fx2trt.fx2trt import TRTInterpreter, InputTensorSpec, TRTModule
# The purpose of this example is to demonstrate the overall flow of lowering a PyTorch
# model to TensorRT via FX with existing FX based tooling. The general lowering flow
# would be like:
Reported by Pylint.
Line: 58
Column: 25
return True
class TensorRTMinimizer(net_min_base._MinimizerBase):
"""
Need to define a Minimizer class for TensorRT because it's used in Splitter.
"""
def __init__(
self,
Reported by Pylint.
Line: 100
Column: 24
# This in the future will be a global TensorRTSplitter and we don't need to create
# it per example.
class TensorRTSplitter(splitter_base._SplitterBase):
"""
Splitter for TensorRT.
"""
def __init__(
self,
Reported by Pylint.
Line: 170
Column: 5
# split into accelerator submodules while nodes with unsupported types will be
# split into cpu submodules.
splitter.node_support_preview()
"""
output:
Supported node types in the model:
torch.nn.modules.conv.Conv2d: ((torch.float32,), {})
torch.nn.modules.batchnorm.BatchNorm2d: ((torch.float32,), {})
Reported by Pylint.
benchmarks/operator_benchmark/pt/cat_test.py
32 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
import random
from typing import List
"""Microbenchmarks for Cat operator"""
cross_product_configs = {
Reported by Pylint.
Line: 14
Column: 21
}
# Configs for PT Cat operator
cat_configs_short = op_bench.config_list(
attr_names=['sizes', 'N', 'dim'],
attrs=[
[(1, 1, 1), 2, 0], # noqa: E241
[(512, 512, 2), 2, 1], # noqa: E241
[(128, 1024, 2), 2, 1], # noqa: E241
Reported by Pylint.
Line: 26
Column: 30
)
# Configs specific to static runtime feature - a fast path runtime for pared down models
cat_configs_static_runtime = op_bench.config_list(
attr_names=['sizes', 'N', 'dim'],
attrs=[
[[(1, 160), (1, 14)], -1, 1],
[[(1, 20, 40), (1, 4, 40), (1, 5, 40)], -1, 1],
[[(1, 580), (1, 174)], -1, 1],
Reported by Pylint.
Line: 40
Column: 20
tags=['static_runtime'],
)
cat_configs_long = op_bench.config_list(
attr_names=['sizes', 'N', 'dim'],
attrs=[
[(2**10, 2**10, 2), 2, 0], # noqa: E241
[(2**10+1, 2**10-1, 2), 2, 1], # noqa: E226,E241
[(2**10, 2**10, 2), 2, 2], # noqa: E241
Reported by Pylint.
Line: 66
Column: 24
)
# There is a different codepath on CUDA for >4 dimensions
cat_configs_multidim = op_bench.config_list(
attr_names=['sizes', 'N', 'dim'],
attrs=[
[(2**6, 2**5, 2**2, 2**4, 2**5), 2, 2], # noqa: E241
[(2**4, 2**5, 2**2, 2**4, 2**5), 8, 2], # noqa: E241
[(2**3+1, 2**5-1, 2**2+1, 2**4-1, 2**5+1), 17, 4], # noqa: E226,E241
Reported by Pylint.
Line: 77
Column: 26
tags=['multidim'],
)
cat_configs_manyinputs = op_bench.config_list(
attr_names=['sizes', 'N', 'dim'],
attrs=[
[[lambda: random.randint(1, 10000)], 100, 0],
[[lambda: random.randint(1, 1000)], 1000, 0],
[[lambda: random.randint(1, 500)], 2000, 0],
Reported by Pylint.
Line: 89
Column: 20
tags=['manyinputs'],
)
class CatBenchmark(op_bench.TorchBenchmarkBase):
def init(self, sizes, N, dim, device):
random.seed(42)
inputs = []
gen_sizes = []
if type(sizes) == list and N == -1:
Reported by Pylint.
Line: 114
Column: 1
return torch.cat(inputs, dim=dim, out=result)
op_bench.generate_pt_test(cat_configs_short +
cat_configs_long +
cat_configs_multidim +
cat_configs_manyinputs +
cat_configs_static_runtime,
CatBenchmark)
Reported by Pylint.
Line: 7
Column: 1
from typing import List
"""Microbenchmarks for Cat operator"""
cross_product_configs = {
'device': ['cpu', 'cuda'],
}
Reported by Pylint.
Line: 97
Column: 17
if type(sizes) == list and N == -1:
gen_sizes = sizes
else:
for i in range(N):
gen_sizes.append([old_size() if callable(old_size) else old_size for old_size in sizes])
for s in gen_sizes:
inputs.append(torch.rand(s, device=device))
result = torch.empty(0, device=device)
Reported by Pylint.