The following issues were found
torch/fx/graph.py
125 issues
Line: 1
Column: 1
from .node import Node, Argument, Target, map_arg, _type_repr, _get_qualified_name
import torch.utils._pytree as pytree
from . import _pytree as fx_pytree
from typing import TYPE_CHECKING, Callable, Any, List, Dict, NamedTuple, Optional, Tuple, Set, FrozenSet, Type
from dataclasses import dataclass
from contextlib import contextmanager
import copy
import torch
Reported by Pylint.
Line: 3
Column: 1
from .node import Node, Argument, Target, map_arg, _type_repr, _get_qualified_name
import torch.utils._pytree as pytree
from . import _pytree as fx_pytree
from typing import TYPE_CHECKING, Callable, Any, List, Dict, NamedTuple, Optional, Tuple, Set, FrozenSet, Type
from dataclasses import dataclass
from contextlib import contextmanager
import copy
import torch
Reported by Pylint.
Line: 18
Column: 5
if TYPE_CHECKING:
from .graph_module import GraphModule # noqa: F401
from ._symbolic_trace import Tracer # noqa: F401
# Mapping of builtins to their `typing` equivalent.
_origin_type_map = {
Reported by Pylint.
Line: 19
Column: 5
if TYPE_CHECKING:
from .graph_module import GraphModule # noqa: F401
from ._symbolic_trace import Tracer # noqa: F401
# Mapping of builtins to their `typing` equivalent.
_origin_type_map = {
list: List,
Reported by Pylint.
Line: 54
Column: 64
_register_custom_builtin('nan', 'from math import nan', math.nan)
_register_custom_builtin('NoneType', 'NoneType = type(None)', type(None))
_register_custom_builtin('torch', 'import torch', torch)
_register_custom_builtin('device', 'from torch import device', torch.device)
_register_custom_builtin('fx_pytree', 'import torch.fx._pytree as fx_pytree', fx_pytree)
_register_custom_builtin('pytree', 'import torch.utils._pytree as pytree', pytree)
def _is_magic(x: str) -> bool:
Reported by Pylint.
Line: 826
Column: 47
Returns: the global name that should be used to reference 'obj' in generated source.
"""
if _is_from_torch(obj) and obj != torch.device: # to support registering torch.device
# HACK: workaround for how torch custom ops are registered. We
# can't import them like normal modules so they must retain their
# fully qualified name.
return _get_qualified_name(obj)
Reported by Pylint.
Line: 212
Column: 24
def __enter__(self):
pass
def __exit__(self, type, value, tb):
self.graph._insert = self.orig_insert
class _node_list:
def __init__(self, graph: 'Graph', direction: str = '_next'):
assert direction in ['_next', '_prev']
Reported by Pylint.
Line: 414
Column: 20
return n
def flatten_inps(self, *args):
flat_args, args_spec = pytree.tree_flatten(args)
return flat_args
def unflatten_outs(self, out):
if self._pytree_info is None:
return out
Reported by Pylint.
Line: 438
Column: 9
raise RuntimeError(f'Tried to erase Node {to_erase} but it still had {len(to_erase.users)} '
f'users in the graph: {to_erase.users}!')
to_erase._remove_from_list()
to_erase._erased = True # iterators may retain handles to erased nodes
self._len -= 1
# Null out this Node's argument nodes so that the Nodes referred to
# can update their ``users`` accordingly
Reported by Pylint.
Line: 439
Column: 9
f'users in the graph: {to_erase.users}!')
to_erase._remove_from_list()
to_erase._erased = True # iterators may retain handles to erased nodes
self._len -= 1
# Null out this Node's argument nodes so that the Nodes referred to
# can update their ``users`` accordingly
new_args = map_arg(to_erase.args, lambda n: None)
Reported by Pylint.
caffe2/python/ideep/convfusion_op_test.py
124 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
import math
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
import math
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.transformations import optimizeForMKLDNN
Reported by Pylint.
Line: 18
Column: 22
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ConvFusionTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 20),
Reported by Pylint.
Line: 32
Column: 59
**mu.gcs)
def test_convolution_relu_fusion(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, group, gc, dc):
conv = core.CreateOperator(
"Conv",
["X0", "w0", "b0"] if use_bias else ["X0", "w0"],
["Y0"],
stride=stride,
Reported by Pylint.
Line: 89
Column: 13
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
# Auto fusion for Conv + ReLU
workspace.ResetWorkspace()
old_net = caffe2_pb2.NetDef()
conv_old = caffe2_pb2.OperatorDef()
Reported by Pylint.
Line: 115
Column: 13
print(Y2.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y2 - Y0)))
self.assertTrue(False)
workspace.SwitchWorkspace(old_ws_name)
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
Reported by Pylint.
Line: 132
Column: 68
**mu.gcs)
def test_convolution_sum_fusion(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, group, sum_add, gc, dc):
pool_S0 = core.CreateOperator(
"MaxPool",
["SX0"],
["S0"],
stride=2,
Reported by Pylint.
Line: 152
Column: 9
group=group,
device_option=dc[0]
)
sum = core.CreateOperator(
sum_add,
["S0", "Y0"],
["S0"],
device_option=dc[0]
)
Reported by Pylint.
Line: 181
Column: 1
fusion_type = 2,
device_option=dc[1]
)
pool_input_size = int(math.ceil(float(size + 2 * pad - kernel + 1) / stride)) * 2;
SX = np.random.rand(
batch_size, output_channels * group, pool_input_size, pool_input_size).astype(np.float32) - 0.5
X = np.random.rand(
batch_size, input_channels * group, size, size).astype(np.float32) - 0.5
w = np.random.rand(
Reported by Pylint.
Line: 215
Column: 13
print(S1.flatten())
print(S0.flatten())
print(np.max(np.abs(S1 - S0)))
self.assertTrue(False)
# Auto fusion for Conv + Sum
workspace.ResetWorkspace()
old_net = caffe2_pb2.NetDef()
pool_S0_old = caffe2_pb2.OperatorDef()
Reported by Pylint.
torch/optim/lr_scheduler.py
124 issues
Line: 10
Column: 1
from collections import Counter
from bisect import bisect_right
from .optimizer import Optimizer
EPOCH_DEPRECATION_WARNING = (
"The epoch parameter in `scheduler.step()` was not necessary and is being "
"deprecated where possible. Please use `scheduler.step()` to step the "
Reported by Pylint.
Line: 157
Column: 30
warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)
self.last_epoch = epoch
if hasattr(self, "_get_closed_form_lr"):
values = self._get_closed_form_lr()
else:
values = self.get_lr()
for i, data in enumerate(zip(self.optimizer.param_groups, values)):
param_group, lr = data
Reported by Pylint.
Line: 246
Column: 16
self.lr_lambdas[idx].__dict__.update(fn)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.")
return [base_lr * lmbda(self.last_epoch)
for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs)]
Reported by Pylint.
Line: 323
Column: 16
self.lr_lambdas[idx].__dict__.update(fn)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch > 0:
return [group['lr'] * lmbda(self.last_epoch)
Reported by Pylint.
Line: 368
Column: 16
super(StepLR, self).__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if (self.last_epoch == 0) or (self.last_epoch % self.step_size != 0):
return [group['lr'] for group in self.optimizer.param_groups]
Reported by Pylint.
Line: 415
Column: 16
super(MultiStepLR, self).__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch not in self.milestones:
return [group['lr'] for group in self.optimizer.param_groups]
Reported by Pylint.
Line: 480
Column: 16
super(WarmUpLR, self).__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch == 0:
return [group['lr'] * self.warmup_factor for group in self.optimizer.param_groups]
Reported by Pylint.
Line: 524
Column: 16
super(ExponentialLR, self).__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch == 0:
return self.base_lrs
Reported by Pylint.
Line: 584
Column: 16
super(CosineAnnealingLR, self).__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch == 0:
return self.base_lrs
Reported by Pylint.
Line: 968
Column: 16
updating the optimizer's momentum.
"""
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
cycle = math.floor(1 + self.last_epoch / self.total_size)
x = 1. + self.last_epoch / self.total_size - cycle
Reported by Pylint.
torch/serialization.py
122 issues
Line: 12
Column: 1
import tempfile
import warnings
from contextlib import closing, contextmanager
from ._utils import _import_dotted_name
from ._six import string_classes as _string_classes
from torch._sources import get_source_lines_and_file
from torch.types import Storage
from typing import Any, BinaryIO, cast, Dict, Optional, Type, Tuple, Union, IO
import copyreg
Reported by Pylint.
Line: 13
Column: 1
import warnings
from contextlib import closing, contextmanager
from ._utils import _import_dotted_name
from ._six import string_classes as _string_classes
from torch._sources import get_source_lines_and_file
from torch.types import Storage
from typing import Any, BinaryIO, cast, Dict, Optional, Type, Tuple, Union, IO
import copyreg
import pickle
Reported by Pylint.
Line: 619
Column: 30
cache = _get_layout.cache # type: ignore[attr-defined]
if not cache:
for v in torch.__dict__.values():
if isinstance(v, torch.layout):
cache[str(v)] = v
return cache[name]
# There are yet not good way to type annotate function attributes https://github.com/python/mypy/issues/2087
_get_layout.cache = {} # type: ignore[attr-defined]
Reported by Pylint.
Line: 625
Column: 16
# There are yet not good way to type annotate function attributes https://github.com/python/mypy/issues/2087
_get_layout.cache = {} # type: ignore[attr-defined]
copyreg.pickle(torch.layout, lambda obj: (_get_layout, (str(obj),)))
def _legacy_load(f, map_location, pickle_module, **pickle_load_args):
deserialized_objects: Dict[int, Any] = {}
Reported by Pylint.
Line: 825
Column: 35
elif isinstance(map_location, _string_classes):
def restore_location(storage, location):
return default_restore_location(storage, map_location)
elif isinstance(map_location, torch.device):
def restore_location(storage, location):
return default_restore_location(storage, str(map_location))
else:
def restore_location(storage, location):
result = map_location(storage, location)
Reported by Pylint.
Line: 100
Column: 12
)
requirement_is_met = module_version >= req_version_tuple
except Exception as e:
message = (
"'%s' module version string is malformed '%s' and cannot be compared"
" with tuple %s"
) % (
module.__name__, module.__version__, str(req_version_tuple)
Reported by Pylint.
Line: 132
Column: 14
def validate_cuda_device(location):
device = torch.cuda._utils._get_device_index(location, True)
if not torch.cuda.is_available():
raise RuntimeError('Attempting to deserialize object on a CUDA '
'device but torch.cuda.is_available() is False. '
'If you are running on a CPU-only machine, '
Reported by Pylint.
Line: 132
Column: 14
def validate_cuda_device(location):
device = torch.cuda._utils._get_device_index(location, True)
if not torch.cuda.is_available():
raise RuntimeError('Attempting to deserialize object on a CUDA '
'device but torch.cuda.is_available() is False. '
'If you are running on a CPU-only machine, '
Reported by Pylint.
Line: 390
Column: 3
serialized_storages = {}
def persistent_id(obj: Any) -> Optional[Tuple]:
# FIXME: the docs say that persistent_id should only return a string
# but torch store returns tuples. This works only in the binary protocol
# see
# https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects
# https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537
if isinstance(obj, type) and issubclass(obj, nn.Module):
Reported by Pylint.
Line: 403
Column: 20
try:
source_lines, _, source_file = get_source_lines_and_file(obj)
source = ''.join(source_lines)
except Exception: # saving the source is optional, so we can ignore any errors
warnings.warn("Couldn't retrieve source code for container of "
"type " + obj.__name__ + ". It won't be checked "
"for correctness upon loading.")
return ('module', obj, source_file, source)
Reported by Pylint.
benchmarks/fastrnns/custom_lstms.py
122 issues
Line: 1
Column: 1
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.jit as jit
import warnings
from collections import namedtuple
from typing import List, Tuple
from torch import Tensor
import numbers
Reported by Pylint.
Line: 2
Column: 1
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.jit as jit
import warnings
from collections import namedtuple
from typing import List, Tuple
from torch import Tensor
import numbers
Reported by Pylint.
Line: 3
Column: 1
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.jit as jit
import warnings
from collections import namedtuple
from typing import List, Tuple
from torch import Tensor
import numbers
Reported by Pylint.
Line: 4
Column: 1
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.jit as jit
import warnings
from collections import namedtuple
from typing import List, Tuple
from torch import Tensor
import numbers
Reported by Pylint.
Line: 8
Column: 1
import warnings
from collections import namedtuple
from typing import List, Tuple
from torch import Tensor
import numbers
'''
Some helper classes for writing custom TorchScript LSTMs.
Reported by Pylint.
Line: 11
Column: 1
from torch import Tensor
import numbers
'''
Some helper classes for writing custom TorchScript LSTMs.
Goals:
- Classes are easy to read, use, and extend
- Performance of custom LSTMs approach fused-kernel-levels of speed.
Reported by Pylint.
Line: 104
Column: 23
self.bias_hh = Parameter(torch.randn(4 * hidden_size))
@jit.script_method
def forward(self, input: Tensor, state: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
hx, cx = state
gates = (torch.mm(input, self.weight_ih.t()) + self.bias_ih +
torch.mm(hx, self.weight_hh.t()) + self.bias_hh)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
Reported by Pylint.
Line: 128
Column: 3
normalized_shape = (normalized_shape,)
normalized_shape = torch.Size(normalized_shape)
# XXX: This is true for our LSTM / NLP use case and helps simplify code
assert len(normalized_shape) == 1
self.weight = Parameter(torch.ones(normalized_shape))
self.bias = Parameter(torch.zeros(normalized_shape))
self.normalized_shape = normalized_shape
Reported by Pylint.
Line: 136
Column: 39
self.normalized_shape = normalized_shape
@jit.script_method
def compute_layernorm_stats(self, input):
mu = input.mean(-1, keepdim=True)
sigma = input.std(-1, keepdim=True, unbiased=False)
return mu, sigma
@jit.script_method
Reported by Pylint.
Line: 142
Column: 23
return mu, sigma
@jit.script_method
def forward(self, input):
mu, sigma = self.compute_layernorm_stats(input)
return (input - mu) / sigma * self.weight + self.bias
class LayerNormLSTMCell(jit.ScriptModule):
Reported by Pylint.
test/test_cpp_extensions_jit.py
121 issues
Line: 14
Column: 1
import textwrap
from multiprocessing import Process
import torch.testing._internal.common_utils as common
import torch
import torch.backends.cudnn
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
from torch.testing._internal.common_utils import gradcheck, TEST_WITH_ASAN, has_breakpad
Reported by Pylint.
Line: 15
Column: 1
from multiprocessing import Process
import torch.testing._internal.common_utils as common
import torch
import torch.backends.cudnn
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
from torch.testing._internal.common_utils import gradcheck, TEST_WITH_ASAN, has_breakpad
Reported by Pylint.
Line: 16
Column: 1
import torch.testing._internal.common_utils as common
import torch
import torch.backends.cudnn
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
from torch.testing._internal.common_utils import gradcheck, TEST_WITH_ASAN, has_breakpad
Reported by Pylint.
Line: 17
Column: 1
import torch.testing._internal.common_utils as common
import torch
import torch.backends.cudnn
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
from torch.testing._internal.common_utils import gradcheck, TEST_WITH_ASAN, has_breakpad
TEST_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
Reported by Pylint.
Line: 18
Column: 1
import torch
import torch.backends.cudnn
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
from torch.testing._internal.common_utils import gradcheck, TEST_WITH_ASAN, has_breakpad
TEST_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
TEST_CUDNN = False
Reported by Pylint.
Line: 19
Column: 1
import torch.backends.cudnn
import torch.utils.cpp_extension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
from torch.testing._internal.common_utils import gradcheck, TEST_WITH_ASAN, has_breakpad
TEST_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
TEST_CUDNN = False
TEST_ROCM = torch.cuda.is_available() and torch.version.hip is not None and ROCM_HOME is not None
Reported by Pylint.
Line: 456
Column: 9
self.assertEqual(result[0], 123)
def test_reload_jit_extension(self):
def compile(code):
return torch.utils.cpp_extension.load_inline(
name="reloaded_jit_extension",
cpp_sources=code,
functions="f",
verbose=True,
Reported by Pylint.
Line: 482
Column: 9
verbose=True,
)
input = torch.randn(2, 5, dtype=dtype)
cpp_linear = extension.Net(5, 2)
cpp_linear.to(dtype)
python_linear = torch.nn.Linear(5, 2).to(dtype)
# First make sure they have the same parameters
Reported by Pylint.
Line: 520
Column: 31
self.x = torch.nn.Parameter(torch.tensor(1.0))
self.net = extension.Net(3, 5)
def forward(self, input):
return self.net.forward(input) + self.x
net = extension.Net(5, 2)
net.double()
net.to(torch.get_default_dtype())
Reported by Pylint.
Line: 532
Column: 9
# C++ module as an element of the Sequential.
sequential = torch.nn.Sequential(M(), torch.nn.Tanh(), net, torch.nn.Sigmoid())
input = torch.randn(2, 3)
# Try calling the module!
output = sequential.forward(input)
# The call operator is bound to forward too.
self.assertEqual(output, sequential(input))
self.assertEqual(list(output.shape), [2, 2])
Reported by Pylint.
test/distributed/elastic/agent/server/test/local_elastic_agent_test.py
121 issues
Line: 23
Column: 1
from unittest import mock
from unittest.mock import Mock, patch
import torch
import torch.distributed as dist
import torch.distributed.elastic.rendezvous.registry as rdzv_registry
import torch.distributed.rpc as rpc
from torch.distributed.elastic.agent.server.api import (
RunResult,
Reported by Pylint.
Line: 24
Column: 1
from unittest.mock import Mock, patch
import torch
import torch.distributed as dist
import torch.distributed.elastic.rendezvous.registry as rdzv_registry
import torch.distributed.rpc as rpc
from torch.distributed.elastic.agent.server.api import (
RunResult,
WorkerSpec,
Reported by Pylint.
Line: 25
Column: 1
import torch
import torch.distributed as dist
import torch.distributed.elastic.rendezvous.registry as rdzv_registry
import torch.distributed.rpc as rpc
from torch.distributed.elastic.agent.server.api import (
RunResult,
WorkerSpec,
WorkerState,
Reported by Pylint.
Line: 26
Column: 1
import torch
import torch.distributed as dist
import torch.distributed.elastic.rendezvous.registry as rdzv_registry
import torch.distributed.rpc as rpc
from torch.distributed.elastic.agent.server.api import (
RunResult,
WorkerSpec,
WorkerState,
)
Reported by Pylint.
Line: 27
Column: 1
import torch.distributed as dist
import torch.distributed.elastic.rendezvous.registry as rdzv_registry
import torch.distributed.rpc as rpc
from torch.distributed.elastic.agent.server.api import (
RunResult,
WorkerSpec,
WorkerState,
)
from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent
Reported by Pylint.
Line: 32
Column: 1
WorkerSpec,
WorkerState,
)
from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent
from torch.distributed.elastic.multiprocessing import Std
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError, record
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.rpc.backend_registry import BackendType
Reported by Pylint.
Line: 33
Column: 1
WorkerState,
)
from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent
from torch.distributed.elastic.multiprocessing import Std
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError, record
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.rpc.backend_registry import BackendType
from torch.testing._internal.common_utils import (
Reported by Pylint.
Line: 34
Column: 1
)
from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent
from torch.distributed.elastic.multiprocessing import Std
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError, record
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.rpc.backend_registry import BackendType
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
Reported by Pylint.
Line: 35
Column: 1
from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent
from torch.distributed.elastic.multiprocessing import Std
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError, record
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.rpc.backend_registry import BackendType
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_TSAN,
Reported by Pylint.
Line: 36
Column: 1
from torch.distributed.elastic.multiprocessing import Std
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError, record
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.rpc.backend_registry import BackendType
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_TSAN,
sandcastle_skip_if,
Reported by Pylint.
caffe2/python/ideep/fc_op_test.py
121 issues
Line: 8
Column: 1
import unittest
from functools import reduce
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 9
Column: 1
import unittest
from functools import reduce
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 17
Column: 22
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class FcTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5),
k=st.integers(1, 5), **mu.gcs)
@settings(deadline=1000)
def test_fc_2_dims(self, n, m, k, gc, dc):
Reported by Pylint.
Line: 45
Column: 54
w=st.integers(1, 5),
axis=st.integers(1, 3),
**mu.gcs)
def test_fc_with_axis(self, n, m, c, h, w, axis, gc, dc):
X = np.random.rand(n, c, h, w).astype(np.float32) - 0.5
k = reduce((lambda x, y: x * y), [n, c, h, w][axis - 4:])
nn = reduce((lambda x, y: x * y), [n, c, h, w][:axis])
W = np.random.rand(m, k).astype(np.float32) - 0.5
b = np.random.rand(m).astype(np.float32) - 0.5
Reported by Pylint.
Line: 115
Column: 13
print(Y1)
print(Y0)
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
dW0 = dW0.flatten()
dW1 = dW1.flatten()
if not np.allclose(dW0, dW1, atol=0.01, rtol=0.01):
print(dW1)
Reported by Pylint.
Line: 123
Column: 13
print(dW1)
print(dW0)
print(np.max(np.abs(dW1 - dW0)))
self.assertTrue(False)
db0 = db0.flatten()
db1 = db1.flatten()
if not np.allclose(db0, db1, atol=0.01, rtol=0.01):
print(db1)
Reported by Pylint.
Line: 131
Column: 13
print(db1)
print(db0)
print(np.max(np.abs(db1 - db0)))
self.assertTrue(False)
@given(n=st.integers(1, 5),
o=st.integers(1, 5),
i=st.integers(1, 5),
h=st.integers(1, 5),
Reported by Pylint.
Line: 141
Column: 58
axis_w=st.integers(1, 3),
**mu.gcs)
@settings(deadline=1000)
def test_fc_with_axis_w(self, n, o, i, h, w, axis_w, gc, dc):
W = np.random.rand(o, i, h, w).astype(np.float32) - 0.5
k = reduce((lambda x, y: x * y), [o, i, h, w][axis_w - 4:])
m = reduce((lambda x, y: x * y), [o, i, h, w][:axis_w])
X = np.random.rand(n, k).astype(np.float32) - 0.5
b = np.random.rand(m).astype(np.float32) - 0.5
Reported by Pylint.
Line: 211
Column: 13
print(Y1)
print(Y0)
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
dW0 = dW0.flatten()
dW1 = dW1.flatten()
if not np.allclose(dW0, dW1, atol=0.01, rtol=0.01):
print(dW1)
Reported by Pylint.
Line: 219
Column: 13
print(dW1)
print(dW0)
print(np.max(np.abs(dW1 - dW0)))
self.assertTrue(False)
db0 = db0.flatten()
db1 = db1.flatten()
if not np.allclose(db0, db1, atol=0.01, rtol=0.01):
print(db1)
Reported by Pylint.
test/benchmark_utils/test_benchmark_utils.py
121 issues
Line: 10
Column: 1
from typing import Any, List, Tuple
import unittest
import torch
import torch.utils.benchmark as benchmark_utils
from torch.testing._internal.common_utils import TestCase, run_tests, IS_SANDCASTLE, IS_WINDOWS, slowTest
import expecttest
import numpy as np
Reported by Pylint.
Line: 11
Column: 1
import unittest
import torch
import torch.utils.benchmark as benchmark_utils
from torch.testing._internal.common_utils import TestCase, run_tests, IS_SANDCASTLE, IS_WINDOWS, slowTest
import expecttest
import numpy as np
Reported by Pylint.
Line: 12
Column: 1
import torch
import torch.utils.benchmark as benchmark_utils
from torch.testing._internal.common_utils import TestCase, run_tests, IS_SANDCASTLE, IS_WINDOWS, slowTest
import expecttest
import numpy as np
CALLGRIND_ARTIFACTS: str = os.path.join(
Reported by Pylint.
Line: 13
Column: 1
import torch
import torch.utils.benchmark as benchmark_utils
from torch.testing._internal.common_utils import TestCase, run_tests, IS_SANDCASTLE, IS_WINDOWS, slowTest
import expecttest
import numpy as np
CALLGRIND_ARTIFACTS: str = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
Reported by Pylint.
Line: 214
Column: 34
)
def __init__(self, stmt, setup, timer, globals):
self._random_state = np.random.RandomState(seed=self._seed)
self._mean_cost = {k: v for k, v in self._function_costs}[stmt]
def sample(self, mean, noise_level):
return max(self._random_state.normal(mean, mean * noise_level), 5e-9)
Reported by Pylint.
Line: 539
Column: 9
counts = collections.Counter([s.counts(denoise=True) // 10_000 * 10_000 for s in stats])
self.assertGreater(max(counts.values()), 1, f"Every instruction count total was unique: {counts}")
from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import wrapper_singleton
self.assertIsNone(
wrapper_singleton()._bindings_module,
"JIT'd bindings are only for back testing."
)
Reported by Pylint.
Line: 213
Column: 48
("with torch.no_grad():\n y = x + 1", 10e-6),
)
def __init__(self, stmt, setup, timer, globals):
self._random_state = np.random.RandomState(seed=self._seed)
self._mean_cost = {k: v for k, v in self._function_costs}[stmt]
def sample(self, mean, noise_level):
return max(self._random_state.normal(mean, mean * noise_level), 5e-9)
Reported by Pylint.
Line: 213
Column: 48
("with torch.no_grad():\n y = x + 1", 10e-6),
)
def __init__(self, stmt, setup, timer, globals):
self._random_state = np.random.RandomState(seed=self._seed)
self._mean_cost = {k: v for k, v in self._function_costs}[stmt]
def sample(self, mean, noise_level):
return max(self._random_state.normal(mean, mean * noise_level), 5e-9)
Reported by Pylint.
Line: 213
Column: 34
("with torch.no_grad():\n y = x + 1", 10e-6),
)
def __init__(self, stmt, setup, timer, globals):
self._random_state = np.random.RandomState(seed=self._seed)
self._mean_cost = {k: v for k, v in self._function_costs}[stmt]
def sample(self, mean, noise_level):
return max(self._random_state.normal(mean, mean * noise_level), 5e-9)
Reported by Pylint.
Line: 213
Column: 41
("with torch.no_grad():\n y = x + 1", 10e-6),
)
def __init__(self, stmt, setup, timer, globals):
self._random_state = np.random.RandomState(seed=self._seed)
self._mean_cost = {k: v for k, v in self._function_costs}[stmt]
def sample(self, mean, noise_level):
return max(self._random_state.normal(mean, mean * noise_level), 5e-9)
Reported by Pylint.
tools/codegen/api/python.py
120 issues
Line: 295
Column: 3
# When binding to C++, it's first binded to a local 'out' variable:
# 'auto out = _r.tensorlist_n<2>(2);',
# then binded to scattered C++ output arguments as 'out[0]', 'out[1]', and etc.
# TODO: maybe don't need keep scattered out fields for python signature?
outputs: Tuple[PythonArgument, ...]
@staticmethod
def from_outputs(outputs: Tuple[PythonArgument, ...]) -> Optional['PythonOutArgument']:
if not outputs:
Reported by Pylint.
Line: 317
Column: 3
raise RuntimeError(f'Unsupported output type: {outputs}')
return PythonOutArgument(
name='out',
# TODO: shouldn't this be OptionalType[ListType[...]], since it defaults to None?
type=ListType(BaseType(BaseTy.Tensor), size),
default='None',
default_init=None,
outputs=outputs,
)
Reported by Pylint.
Line: 331
Column: 3
name: str
# Positional arguments.
# TODO: create a dedicated SelfArgument type for 'self'?
input_args: Tuple[PythonArgument, ...]
# Keyword arguments excluding the 'out' argument and scattered kwargs belonging
# to TensorOptions (dtype, layout, device, pin_memory, requires_grad, etc).
input_kwargs: Tuple[PythonArgument, ...]
Reported by Pylint.
Line: 348
Column: 3
# It's possible that the C++ signature doesn't take TensorOptions object (e.g.
# for out variant), in which case they will be used as scattered fields without
# being packed into 'options'.
# TODO: maybe create a PythonTensorOptionsArgument?
tensor_options_args: Tuple[PythonArgument, ...]
# method or function signature?
method: bool
Reported by Pylint.
Line: 471
Column: 43
returns_str = self.returns.returns_str_pyi()
return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> Optional[str]:
# the codegen doesn't include vararg variants for deprecated signatures
return None
# This struct is used to hold the PythonSignature and its corresponding
# NativeFunction BEFORE grouping base and out-variant functions.
Reported by Pylint.
Line: 607
Column: 3
return 'Tensor?'
elem = argument_type_str(t.elem, simple_type=simple_type)
if elem == 'Layout':
# TODO: fix this special case in PythonArgParser?
return 'Layout'
else:
return f'{elem}?'
elif isinstance(t, ListType):
Reported by Pylint.
Line: 646
Column: 3
return PythonArgument(
name=a.name,
type=a.type,
# TODO: directly translate a.default to python default
default=str(pythonify_default(cpp.default_expr(a.default, a.type)))
if a.default is not None else None,
default_init=None,
)
Reported by Pylint.
Line: 739
Column: 3
method=method,
)
# TODO blowtorch
# note: removing this will be BC-breaking. A quick test shows that
# randperm will otherwise default its dtype to torch.float64
def _dtype_default_type_hack(name: str) -> str:
if name.startswith('randperm') or name == 'tril_indices' or name == 'triu_indices':
return 'torch.int64'
Reported by Pylint.
Line: 809
Column: 3
if str(t.elem) == 'int':
ret = 'Union[_int, _size]' if t.size is not None else '_size'
elif t.is_tensor_like():
# TODO: this doesn't seem right...
# Tensor?[] currently translates to Optional[Union[Tuple[Tensor, ...], List[Tensor]]]
# It should probably translate to Union[Tuple[Optional[Tensor], ...], List[Optional[Tensor]]]
if isinstance(t.elem, OptionalType):
add_optional = True
ret = 'Union[Tensor, Tuple[Tensor, ...], List[Tensor]]' if t.size is not None else \
Reported by Pylint.
Line: 826
Column: 5
ret = 'Optional[' + ret + ']'
return ret
raise RuntimeError(f'unrecognized type {repr(t)}')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# C++ Function Dispatch
Reported by Pylint.