The following issues were found
torch/testing/_internal/distributed/rpc/dist_optimizer_test.py
70 issues
Line: 23
Column: 17
# default generator. The race from multiple RPC threads could mess up
# the draw order from the default RNG instance, leading to
# non-deterministic behavior. Hence, create a dedicated RNG here.
g_cpu = torch.Generator()
g_cpu.manual_seed(0)
self.w = torch.rand((3, 3), requires_grad=requires_grad, generator=g_cpu)
def forward(self, t1):
return torch.mm(self.w, t1)
Reported by Pylint.
Line: 25
Column: 18
# non-deterministic behavior. Hence, create a dedicated RNG here.
g_cpu = torch.Generator()
g_cpu.manual_seed(0)
self.w = torch.rand((3, 3), requires_grad=requires_grad, generator=g_cpu)
def forward(self, t1):
return torch.mm(self.w, t1)
def get_w(self):
Reported by Pylint.
Line: 28
Column: 16
self.w = torch.rand((3, 3), requires_grad=requires_grad, generator=g_cpu)
def forward(self, t1):
return torch.mm(self.w, t1)
def get_w(self):
return self.w
Reported by Pylint.
Line: 99
Column: 33
@dist_init()
def test_dist_optim_exception(self):
# distributed version
owner1 = "worker%d" % ((self.rank + 1) % self.world_size)
owner2 = "worker%d" % ((self.rank + 2) % self.world_size)
remote_module1 = rpc.remote(owner1, MyModule)
remote_module2 = rpc.remote(owner2, MyModule)
remote_param1 = remote_method(MyModule.get_w, remote_module1)
Reported by Pylint.
Line: 100
Column: 33
def test_dist_optim_exception(self):
# distributed version
owner1 = "worker%d" % ((self.rank + 1) % self.world_size)
owner2 = "worker%d" % ((self.rank + 2) % self.world_size)
remote_module1 = rpc.remote(owner1, MyModule)
remote_module2 = rpc.remote(owner2, MyModule)
remote_param1 = remote_method(MyModule.get_w, remote_module1)
remote_param2 = remote_method(MyModule.get_w, remote_module2)
Reported by Pylint.
Line: 112
Column: 21
)
with dist_autograd.context() as context_id:
g_cpu = torch.Generator()
g_cpu.manual_seed(0)
t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
output1 = rpc_async_method(MyModule.forward, remote_module1, t2)
output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait())
Reported by Pylint.
Line: 114
Column: 18
with dist_autograd.context() as context_id:
g_cpu = torch.Generator()
g_cpu.manual_seed(0)
t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
output1 = rpc_async_method(MyModule.forward, remote_module1, t2)
output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait())
loss = torch.add(output2.wait(), t1).sum()
Reported by Pylint.
Line: 115
Column: 18
g_cpu = torch.Generator()
g_cpu.manual_seed(0)
t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
output1 = rpc_async_method(MyModule.forward, remote_module1, t2)
output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait())
loss = torch.add(output2.wait(), t1).sum()
dist_autograd.backward(context_id, [loss])
Reported by Pylint.
Line: 118
Column: 20
t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
output1 = rpc_async_method(MyModule.forward, remote_module1, t2)
output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait())
loss = torch.add(output2.wait(), t1).sum()
dist_autograd.backward(context_id, [loss])
with self.assertRaisesRegex(Exception, "Error running optimizer"):
dist_optim.step(context_id)
Reported by Pylint.
Line: 121
Column: 18
loss = torch.add(output2.wait(), t1).sum()
dist_autograd.backward(context_id, [loss])
with self.assertRaisesRegex(Exception, "Error running optimizer"):
dist_optim.step(context_id)
@dist_init()
def test_dist_optim_exception_on_constructor(self):
# distributed version
Reported by Pylint.
torch/distributed/nn/api/remote_module.py
68 issues
Line: 22
Column: 1
import torch
import torch.distributed.rpc as rpc
from torch import Tensor, device, dtype, nn
from torch.distributed.nn.jit import instantiator
from torch.distributed import _remote_device
from torch.distributed.rpc.internal import _internal_rpc_pickler
from torch.nn import Module
from torch.nn.parameter import Parameter
Reported by Pylint.
Line: 22
Column: 1
import torch
import torch.distributed.rpc as rpc
from torch import Tensor, device, dtype, nn
from torch.distributed.nn.jit import instantiator
from torch.distributed import _remote_device
from torch.distributed.rpc.internal import _internal_rpc_pickler
from torch.nn import Module
from torch.nn.parameter import Parameter
Reported by Pylint.
Line: 436
Column: 45
# If ``enable_moving_cpu_tensors_to_cuda`` is true, but the device map is not set,
# then any CPU tensors can still be moved to a cuda device to run forward,
# but the output must be moved back to CPU before being sent over the wire.
enable_moving_cpu_tensors_to_cuda = torch.device(self.device).type == "cuda"
return enable_moving_cpu_tensors_to_cuda
def _init_template(self, module_interface_cls, enable_moving_cpu_tensors_to_cuda):
"""
Instantiates template on local side.
Reported by Pylint.
Line: 80
Column: 46
)
def _create_module(module_cls, args, kwargs, device):
module = module_cls(*args, **kwargs)
if not isinstance(module, nn.Module):
raise ValueError(
"Expect `module_cls(*args, **kwargs)` returns an instance of <class nn.Module>, "
f"but it returns an instance of {type(module)}."
Reported by Pylint.
Line: 92
Column: 31
def _create_module_with_interface(
module_cls, args, kwargs, device, module_interface_cls
):
module = _create_module(module_cls, args, kwargs, device)
if module_interface_cls is not None:
module = torch.jit.script(module)
return rpc.RRef(module, module_interface_cls)
Reported by Pylint.
Line: 111
Column: 1
raise ValueError("Method ``{}`` not supported for RemoteModule".format(name))
class _RemoteModule(nn.Module):
def __init__(
self,
remote_device: str,
module_cls: Type[nn.Module],
args: Tuple = None,
Reported by Pylint.
Line: 243
Column: 3
# Create the module on the remote side.
fut.wait() # Ensure remote_module_cls is available on remote side.
# TODO: We need to change this to rpc.remote, and make it async (see the else branch below).
# For that we need to be able to apply _module_interface_cls to the RRef returned by rpc.remote
# See https://github.com/pytorch/pytorch/issues/58098 for more context.
self.module_rref = rpc.rpc_sync(
self.on,
_create_module_with_interface,
Reported by Pylint.
Line: 315
Column: 23
def apply(self: T, fn: Callable[[Module], None]) -> T: # type: ignore[return]
_raise_not_supported(self.apply.__name__)
def cuda(self: T, device: Optional[Union[int, device]] = None) -> T: # type: ignore[return]
_raise_not_supported(self.cuda.__name__)
def xpu(self: T, device: Optional[Union[int, device]] = None) -> T: # type: ignore[return]
_raise_not_supported(self.xpu.__name__)
Reported by Pylint.
Line: 318
Column: 22
def cuda(self: T, device: Optional[Union[int, device]] = None) -> T: # type: ignore[return]
_raise_not_supported(self.cuda.__name__)
def xpu(self: T, device: Optional[Union[int, device]] = None) -> T: # type: ignore[return]
_raise_not_supported(self.xpu.__name__)
def cpu(self: T) -> T: # type: ignore[return]
_raise_not_supported(self.cpu.__name__)
Reported by Pylint.
Line: 339
Column: 1
def bfloat16(self: T) -> T: # type: ignore[return]
_raise_not_supported(self.bfloat16.__name__)
def to(self, *args, **kwargs) -> T: # type: ignore[return]
_raise_not_supported(self.to.__name__)
def register_backward_hook( # type: ignore[return]
self, hook: Callable[[Module, _grad_t, _grad_t], Union[None, Tensor]]
) -> RemovableHandle:
Reported by Pylint.
torch/quantization/ns/graph_passes.py
68 issues
Line: 6
Column: 1
from torch.fx.graph import Graph, Node
from torch.quantization.fx.utils import get_new_attr_name_with_prefix
from .utils import (
get_node_first_input_and_output_type,
getattr_from_fqn,
NodeInputOrOutputType,
return_first_non_observer_node,
get_number_of_non_param_args,
Reported by Pylint.
Line: 17
Column: 1
get_node_input_qparams,
)
from .ns_types import (
NSSingleResultValuesType,
NSSubgraph,
NSNodeTargetType,
)
from torch.quantization.ns.mappings import (
Reported by Pylint.
Line: 190
Column: 26
'get_attr', zero_point_node_name, (), {}, zero_point_node_name)
# create the quantize_per_tensor call
return graph_c.create_node(
'call_function', torch.quantize_per_tensor,
(prev_node_c, scale_node, zero_point_node, torch.quint8), {},
dtype_cast_name)
def _insert_dtype_cast_after_node(
node_a: Node,
Reported by Pylint.
Line: 191
Column: 52
# create the quantize_per_tensor call
return graph_c.create_node(
'call_function', torch.quantize_per_tensor,
(prev_node_c, scale_node, zero_point_node, torch.quint8), {},
dtype_cast_name)
def _insert_dtype_cast_after_node(
node_a: Node,
node_c: Node,
Reported by Pylint.
Line: 243
Column: 25
(node_input_type_a == NodeInputOrOutputType.FP32 and
node_input_type_c == NodeInputOrOutputType.FP32_OR_INT8)
):
dtype_cast_op = torch.dequantize
elif (
node_input_type_a == node_input_type_c and
node_input_type_a != NodeInputOrOutputType.UNKNOWN
):
dtype_cast_mod_cls = torch.nn.Identity
Reported by Pylint.
Line: 258
Column: 29
node_a_input_qparams = get_node_input_qparams(
node_a, gm_a, node_type_to_io_type_map)
if node_a_input_qparams is not None:
dtype_cast_op = torch.quantize_per_tensor # type: ignore[assignment]
dtype_cast_scale, dtype_cast_zero_point = node_a_input_qparams
else:
raise AssertionError(
f"dtype cast from {node_input_type_c} {node_c.format_node()} to " +
f"{node_input_type_a} {node_a.format_node()} needs to be implemented")
Reported by Pylint.
Line: 42
Column: 15
if is_activation_post_process(module):
assert isinstance(node.args[0], Node)
node_to_use_for_fqn = node.args[0]
fqn = gm._node_name_to_scope[node_to_use_for_fqn.name][0] # type: ignore[index]
return fqn # type: ignore[return-value]
def _insert_logger_after_node(
node: Node,
gm: GraphModule,
Reported by Pylint.
Line: 99
Column: 5
new_graph = Graph()
env: Dict[str, Any] = {}
modules = dict(gm.named_modules())
def load_arg(a):
return map_arg(a, lambda node: env[node.name])
for node in gm.graph.nodes:
Reported by Pylint.
Line: 237
Column: 3
node_input_type_c == NodeInputOrOutputType.INT8) or
(node_input_type_a == NodeInputOrOutputType.FP32 and
node_input_type_c == NodeInputOrOutputType.FP16) or
# TODO(future PR): determine the actual dtype of node_c,
# the current code only works because dequantize works with
# multiple input dtypes.
(node_input_type_a == NodeInputOrOutputType.FP32 and
node_input_type_c == NodeInputOrOutputType.FP32_OR_INT8)
):
Reported by Pylint.
Line: 290
Column: 3
new_dtype_cast_name = \
get_new_attr_name_with_prefix(node_name_prefix)(gm_b)
if dtype_cast_op:
# TODO(future PR): add handling for quantize_per_tensor
new_dtype_cast_node = graph_c.create_node(
'call_function', dtype_cast_op, (prev_node_c_inner,), {},
new_dtype_cast_name)
results.append(new_dtype_cast_node)
else:
Reported by Pylint.
benchmarks/sparse/dlmc/utils.py
68 issues
Line: 1
Column: 1
import torch
from pathlib import Path
from scipy import sparse
import math
def to_coo_scipy(x):
indices_1 = x._indices().numpy()
values_1 = x._values().numpy()
Reported by Pylint.
Line: 3
Column: 1
import torch
from pathlib import Path
from scipy import sparse
import math
def to_coo_scipy(x):
indices_1 = x._indices().numpy()
values_1 = x._values().numpy()
Reported by Pylint.
Line: 8
Column: 17
def to_coo_scipy(x):
indices_1 = x._indices().numpy()
values_1 = x._values().numpy()
return sparse.coo_matrix((values_1, (indices_1[0], indices_1[1])),
shape=x.shape)
Reported by Pylint.
Line: 9
Column: 16
def to_coo_scipy(x):
indices_1 = x._indices().numpy()
values_1 = x._values().numpy()
return sparse.coo_matrix((values_1, (indices_1[0], indices_1[1])),
shape=x.shape)
def sparse_grad_output(a, b):
Reported by Pylint.
Line: 26
Column: 33
def read_matrix_params(path):
with open(path, 'r') as file:
line = file.readline()
nrows, ncols, nnz = map(lambda el: int(el), line.split(', '))
return (nrows, ncols), nnz
def csr_to_coo(indices, indptr, shape):
n_rows, n_cols = shape
Reported by Pylint.
Line: 31
Column: 13
def csr_to_coo(indices, indptr, shape):
n_rows, n_cols = shape
cols = indices
rows = [0] * len(cols)
for i in range(n_rows):
for j in range(indptr[i], indptr[i + 1]):
rows[j] = i
Reported by Pylint.
Line: 42
Column: 33
def load_sparse_matrix(path, device):
with open(path, 'r') as file:
nrows, ncols, nnz = map(lambda el: int(el), file.readline().split(', '))
index_pointers = map(lambda el: int(el), file.readline().split())
indices = map(lambda el: int(el), file.readline().split())
index_pointers = list(index_pointers)
indices = list(indices)
Reported by Pylint.
Line: 43
Column: 30
def load_sparse_matrix(path, device):
with open(path, 'r') as file:
nrows, ncols, nnz = map(lambda el: int(el), file.readline().split(', '))
index_pointers = map(lambda el: int(el), file.readline().split())
indices = map(lambda el: int(el), file.readline().split())
index_pointers = list(index_pointers)
indices = list(indices)
data = torch.randn(nnz, dtype=torch.double)
Reported by Pylint.
Line: 44
Column: 23
with open(path, 'r') as file:
nrows, ncols, nnz = map(lambda el: int(el), file.readline().split(', '))
index_pointers = map(lambda el: int(el), file.readline().split())
indices = map(lambda el: int(el), file.readline().split())
index_pointers = list(index_pointers)
indices = list(indices)
data = torch.randn(nnz, dtype=torch.double)
shape = (nrows, ncols)
Reported by Pylint.
Line: 55
Column: 33
def gen_vector(path, device):
with open(path, 'r') as file:
nrows, ncols, nnz = map(lambda el: int(el), file.readline().split(', '))
index_pointers = map(lambda el: int(el), file.readline().split())
indices = map(lambda el: int(el), file.readline().split())
return torch.randn(nrows, dtype=torch.double, device=device)
Reported by Pylint.
caffe2/python/nomnigraph.py
68 issues
Line: 26
Column: 34
serialized_device_map = {}
for k in device_map:
serialized_device_map[k] = device_map[k].SerializeToString()
self._NNModule = C.NNModuleFromProtobufDistributed(
serialized_proto, serialized_device_map
)
# Default
elif serialized_proto:
self._NNModule, self._OpList = C.NNModuleFromProtobuf(serialized_proto)
Reported by Pylint.
Line: 31
Column: 48
)
# Default
elif serialized_proto:
self._NNModule, self._OpList = C.NNModuleFromProtobuf(serialized_proto)
else:
raise Exception(
"NNModule can be constructed with core.Net or caffe2_pb2.NetDef types"
)
else:
Reported by Pylint.
Line: 37
Column: 30
"NNModule can be constructed with core.Net or caffe2_pb2.NetDef types"
)
else:
self._NNModule = C.NNModule()
@property
def dataFlow(self):
return self._NNModule.dataFlow()
Reported by Pylint.
Line: 78
Column: 9
return self._NNModule.dataFlow().replaceNode(old_node, new_node)
def replaceProducer(self, tensor, new_producer):
C.replaceProducer(tensor, new_producer)
def replaceAllUsesWith(self, old_tensor, new_tensor):
C.replaceAllUsesWith(old_tensor, new_tensor)
def replaceAsConsumer(self, old_consumer, new_consumer):
Reported by Pylint.
Line: 81
Column: 9
C.replaceProducer(tensor, new_producer)
def replaceAllUsesWith(self, old_tensor, new_tensor):
C.replaceAllUsesWith(old_tensor, new_tensor)
def replaceAsConsumer(self, old_consumer, new_consumer):
C.replaceAsConsumer(old_consumer, new_consumer)
def replaceSubgraph(self, subgraph, new_node, inputs, outputs):
Reported by Pylint.
Line: 84
Column: 9
C.replaceAllUsesWith(old_tensor, new_tensor)
def replaceAsConsumer(self, old_consumer, new_consumer):
C.replaceAsConsumer(old_consumer, new_consumer)
def replaceSubgraph(self, subgraph, new_node, inputs, outputs):
self._NNModule.replaceSubgraph(subgraph, new_node, inputs, outputs)
def deleteSubgraph(self, subgraph):
Reported by Pylint.
Line: 105
Column: 17
def match(self, pattern):
for n in self.dataFlow.getMutableNodes():
m = C.matchSubgraph(n, pattern)
if m:
yield m
def render(s):
Reported by Pylint.
Line: 133
Column: 21
print(s)
NeuralNetOperator = C.NeuralNetOperator
Operator = C.NeuralNetOperator
NeuralNetData = C.NeuralNetData
Data = C.NeuralNetData
NNSubgraph = C.NNSubgraph
NNMatchGraph = C.NNMatchGraph
Reported by Pylint.
Line: 134
Column: 12
NeuralNetOperator = C.NeuralNetOperator
Operator = C.NeuralNetOperator
NeuralNetData = C.NeuralNetData
Data = C.NeuralNetData
NNSubgraph = C.NNSubgraph
NNMatchGraph = C.NNMatchGraph
Graph = C.Graph
Reported by Pylint.
Line: 135
Column: 17
NeuralNetOperator = C.NeuralNetOperator
Operator = C.NeuralNetOperator
NeuralNetData = C.NeuralNetData
Data = C.NeuralNetData
NNSubgraph = C.NNSubgraph
NNMatchGraph = C.NNMatchGraph
Graph = C.Graph
Annotation = C.Annotation
Reported by Pylint.
torch/_tensor_str.py
68 issues
Line: 90
Column: 35
self.max_width = max(self.max_width, len(value_str))
else:
nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))
if nonzero_finite_vals.numel() == 0:
# no valid number, do nothing
return
Reported by Pylint.
Line: 90
Column: 68
self.max_width = max(self.max_width, len(value_str))
else:
nonzero_finite_vals = torch.masked_select(tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0))
if nonzero_finite_vals.numel() == 0:
# no valid number, do nothing
return
Reported by Pylint.
Line: 102
Column: 29
nonzero_finite_max = nonzero_finite_abs.max().double()
for value in nonzero_finite_vals:
if value != torch.ceil(value):
self.int_mode = False
break
if self.int_mode:
# in int_mode for floats, all numbers are integers, and we append a decimal to nonfinites
Reported by Pylint.
Line: 190
Column: 54
if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
data = ([_val_formatter(val) for val in self[:PRINT_OPTS.edgeitems].tolist()] +
[' ...'] +
[_val_formatter(val) for val in self[-PRINT_OPTS.edgeitems:].tolist()])
else:
data = [_val_formatter(val) for val in self.tolist()]
data_lines = [data[i:i + elements_per_line] for i in range(0, len(data), elements_per_line)]
lines = [', '.join(line) for line in data_lines]
Reported by Pylint.
Line: 241
Column: 53
if self.is_neg():
self = self.resolve_neg()
if self.dtype is torch.float16 or self.dtype is torch.bfloat16:
self = self.float()
if self.dtype.is_complex:
# handle the conjugate bit
self = self.resolve_conj()
Reported by Pylint.
Line: 241
Column: 22
if self.is_neg():
self = self.resolve_neg()
if self.dtype is torch.float16 or self.dtype is torch.bfloat16:
self = self.float()
if self.dtype.is_complex:
# handle the conjugate bit
self = self.resolve_conj()
Reported by Pylint.
Line: 276
Column: 65
return self
if dim == 1:
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
return torch.cat((self[:PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems:]))
else:
return self
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)]
end = ([self[i]
Reported by Pylint.
Line: 276
Column: 20
return self
if dim == 1:
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
return torch.cat((self[:PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems:]))
else:
return self
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)]
end = ([self[i]
Reported by Pylint.
Line: 283
Column: 16
start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)]
end = ([self[i]
for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))])
return torch.stack([get_summarized_data(x) for x in (start + end)])
else:
return torch.stack([get_summarized_data(x) for x in self])
def _str_intern(inp):
prefix = 'tensor('
Reported by Pylint.
Line: 285
Column: 16
for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))])
return torch.stack([get_summarized_data(x) for x in (start + end)])
else:
return torch.stack([get_summarized_data(x) for x in self])
def _str_intern(inp):
prefix = 'tensor('
indent = len(prefix)
suffixes = []
Reported by Pylint.
torch/nn/modules/transformer.py
67 issues
Line: 6
Column: 1
import torch
from torch import Tensor
from .. import functional as F
from .module import Module
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
Reported by Pylint.
Line: 7
Column: 1
import torch
from torch import Tensor
from .. import functional as F
from .module import Module
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
Reported by Pylint.
Line: 8
Column: 1
from torch import Tensor
from .. import functional as F
from .module import Module
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNorm
Reported by Pylint.
Line: 9
Column: 1
from .. import functional as F
from .module import Module
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNorm
Reported by Pylint.
Line: 10
Column: 1
from .module import Module
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNorm
Reported by Pylint.
Line: 11
Column: 1
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNorm
class Transformer(Module):
Reported by Pylint.
Line: 12
Column: 1
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNorm
class Transformer(Module):
r"""A transformer model. User is able to modify the attributes as needed. The architecture
Reported by Pylint.
Line: 13
Column: 1
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNorm
class Transformer(Module):
r"""A transformer model. User is able to modify the attributes as needed. The architecture
is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
Reported by Pylint.
Line: 152
Column: 16
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
return torch.triu(torch.full((sz, sz), float('-inf')), diagonal=1)
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
Reported by Pylint.
Line: 152
Column: 27
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
return torch.triu(torch.full((sz, sz), float('-inf')), diagonal=1)
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
Reported by Pylint.
test/package/test_dependency_api.py
67 issues
Line: 7
Column: 1
from textwrap import dedent
from unittest import skipIf
from torch.package import EmptyMatchError, Importer, PackageExporter, PackageImporter
from torch.package.package_exporter import PackagingError
from torch.testing._internal.common_utils import IS_WINDOWS, run_tests
try:
from .common import PackageTestCase
Reported by Pylint.
Line: 8
Column: 1
from unittest import skipIf
from torch.package import EmptyMatchError, Importer, PackageExporter, PackageImporter
from torch.package.package_exporter import PackagingError
from torch.testing._internal.common_utils import IS_WINDOWS, run_tests
try:
from .common import PackageTestCase
except ImportError:
Reported by Pylint.
Line: 9
Column: 1
from torch.package import EmptyMatchError, Importer, PackageExporter, PackageImporter
from torch.package.package_exporter import PackagingError
from torch.testing._internal.common_utils import IS_WINDOWS, run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
Reported by Pylint.
Line: 32
Column: 9
he.save_source_string("foo", "import package_a.subpackage; import module_a")
buffer.seek(0)
hi = PackageImporter(buffer)
import module_a
import package_a.subpackage
module_a_im = hi.import_module("module_a")
hi.import_module("package_a.subpackage")
package_a_im = hi.import_module("package_a")
Reported by Pylint.
Line: 33
Column: 9
buffer.seek(0)
hi = PackageImporter(buffer)
import module_a
import package_a.subpackage
module_a_im = hi.import_module("module_a")
hi.import_module("package_a.subpackage")
package_a_im = hi.import_module("package_a")
Reported by Pylint.
Line: 59
Column: 9
)
buffer.seek(0)
hi = PackageImporter(buffer)
import module_a
import package_a.subpackage
module_a_im = hi.import_module("module_a")
hi.import_module("package_a.subpackage")
package_a_im = hi.import_module("package_a")
Reported by Pylint.
Line: 60
Column: 9
buffer.seek(0)
hi = PackageImporter(buffer)
import module_a
import package_a.subpackage
module_a_im = hi.import_module("module_a")
hi.import_module("package_a.subpackage")
package_a_im = hi.import_module("package_a")
Reported by Pylint.
Line: 75
Column: 9
Test that an error is thrown when a extern glob is specified with allow_empty=True
and no matching module is required during packaging.
"""
import package_a.subpackage # noqa: F401
buffer = BytesIO()
with self.assertRaisesRegex(EmptyMatchError, r"did not match any modules"):
with PackageExporter(buffer) as exporter:
exporter.extern(include=["package_b.*"], allow_empty=False)
Reported by Pylint.
Line: 121
Column: 9
he.save_source_string("foo", "import package_a.subpackage")
buffer.seek(0)
hi = PackageImporter(buffer)
import package_a.subpackage
_ = package_a.subpackage
import module_a
_ = module_a
Reported by Pylint.
Line: 124
Column: 9
import package_a.subpackage
_ = package_a.subpackage
import module_a
_ = module_a
m = hi.import_module("package_a.subpackage")
r = m.result
Reported by Pylint.
torch/_appdirs.py
67 issues
Line: 498
Column: 5
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import winreg as _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
Reported by Pylint.
Line: 515
Column: 5
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
Reported by Pylint.
Line: 569
Column: 5
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
Reported by Pylint.
Line: 570
Column: 5
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
Reported by Pylint.
Line: 573
Column: 11
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
Reported by Pylint.
Line: 586
Column: 15
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
Reported by Pylint.
Line: 39
Column: 1
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""Utilities for determining application-specific dirs.
See <https://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
Reported by Pylint.
Line: 75
Column: 19
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
Reported by Pylint.
Line: 75
Column: 33
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
Reported by Pylint.
Line: 130
Column: 33
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
Reported by Pylint.
caffe2/python/operator_test/instance_norm_test.py
67 issues
Line: 6
Column: 1
import numpy as np
from hypothesis import given, assume, settings
import hypothesis.strategies as st
from caffe2.python import core, model_helper, brew, utils
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 7
Column: 1
import numpy as np
from hypothesis import given, assume, settings
import hypothesis.strategies as st
from caffe2.python import core, model_helper, brew, utils
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 116
Column: 45
epsilon=st.floats(1e-6, 1e-4),
store_mean=st.booleans(),
store_inv_stdev=st.booleans())
def test_instance_norm_layout(self, gc, dc, N, C, H, W, store_mean,
store_inv_stdev, epsilon, seed):
# force store_inv_stdev if store_mean to match existing forward pass
# implementation
store_inv_stdev |= store_mean
Reported by Pylint.
Line: 154
Column: 23
store_inv_stdev=st.booleans(),
inplace=st.booleans())
def test_instance_norm_reference_check(
self, gc, dc, N, C, H, W, order, store_mean, store_inv_stdev,
epsilon, seed, inplace):
np.random.seed(seed)
# force store_inv_stdev if store_mean to match existing forward pass
# implementation
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
from hypothesis import given, assume, settings
import hypothesis.strategies as st
from caffe2.python import core, model_helper, brew, utils
Reported by Pylint.
Line: 13
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import unittest
class TestInstanceNorm(serial.SerializedTestCase):
def _get_inputs(self, N, C, H, W, order):
Reported by Pylint.
Line: 16
Column: 1
import unittest
class TestInstanceNorm(serial.SerializedTestCase):
def _get_inputs(self, N, C, H, W, order):
input_data = np.random.rand(N, C, H, W).astype(np.float32)
if order == 'NHWC':
# Allocate in the same order as NCHW and transpose to make sure
Reported by Pylint.
Line: 18
Column: 5
class TestInstanceNorm(serial.SerializedTestCase):
def _get_inputs(self, N, C, H, W, order):
input_data = np.random.rand(N, C, H, W).astype(np.float32)
if order == 'NHWC':
# Allocate in the same order as NCHW and transpose to make sure
# the inputs are identical on freshly-seeded calls.
input_data = utils.NCHW2NHWC(input_data)
Reported by Pylint.
Line: 18
Column: 5
class TestInstanceNorm(serial.SerializedTestCase):
def _get_inputs(self, N, C, H, W, order):
input_data = np.random.rand(N, C, H, W).astype(np.float32)
if order == 'NHWC':
# Allocate in the same order as NCHW and transpose to make sure
# the inputs are identical on freshly-seeded calls.
input_data = utils.NCHW2NHWC(input_data)
Reported by Pylint.
Line: 18
Column: 5
class TestInstanceNorm(serial.SerializedTestCase):
def _get_inputs(self, N, C, H, W, order):
input_data = np.random.rand(N, C, H, W).astype(np.float32)
if order == 'NHWC':
# Allocate in the same order as NCHW and transpose to make sure
# the inputs are identical on freshly-seeded calls.
input_data = utils.NCHW2NHWC(input_data)
Reported by Pylint.