The following issues were found
torch/nn/modules/rnn.py
158 issues
Line: 8
Column: 1
import torch
from torch import Tensor
from .module import Module
from ..parameter import Parameter
from ..utils.rnn import PackedSequence
from .. import init
from ... import _VF
Reported by Pylint.
Line: 9
Column: 1
import torch
from torch import Tensor
from .module import Module
from ..parameter import Parameter
from ..utils.rnn import PackedSequence
from .. import init
from ... import _VF
_rnn_impls = {
Reported by Pylint.
Line: 10
Column: 1
from torch import Tensor
from .module import Module
from ..parameter import Parameter
from ..utils.rnn import PackedSequence
from .. import init
from ... import _VF
_rnn_impls = {
'RNN_TANH': _VF.rnn_tanh,
Reported by Pylint.
Line: 11
Column: 1
from .module import Module
from ..parameter import Parameter
from ..utils.rnn import PackedSequence
from .. import init
from ... import _VF
_rnn_impls = {
'RNN_TANH': _VF.rnn_tanh,
'RNN_RELU': _VF.rnn_relu,
Reported by Pylint.
Line: 12
Column: 1
from ..parameter import Parameter
from ..utils.rnn import PackedSequence
from .. import init
from ... import _VF
_rnn_impls = {
'RNN_TANH': _VF.rnn_tanh,
'RNN_RELU': _VF.rnn_relu,
}
Reported by Pylint.
Line: 89
Column: 34
real_hidden_size = proj_size if proj_size > 0 else hidden_size
layer_input_size = input_size if layer == 0 else real_hidden_size * num_directions
w_ih = Parameter(torch.empty((gate_size, layer_input_size), **factory_kwargs))
w_hh = Parameter(torch.empty((gate_size, real_hidden_size), **factory_kwargs))
b_ih = Parameter(torch.empty(gate_size, **factory_kwargs))
# Second bias vector included for CuDNN compatibility. Only one
# bias vector is needed in standard definition.
b_hh = Parameter(torch.empty(gate_size, **factory_kwargs))
Reported by Pylint.
Line: 90
Column: 34
layer_input_size = input_size if layer == 0 else real_hidden_size * num_directions
w_ih = Parameter(torch.empty((gate_size, layer_input_size), **factory_kwargs))
w_hh = Parameter(torch.empty((gate_size, real_hidden_size), **factory_kwargs))
b_ih = Parameter(torch.empty(gate_size, **factory_kwargs))
# Second bias vector included for CuDNN compatibility. Only one
# bias vector is needed in standard definition.
b_hh = Parameter(torch.empty(gate_size, **factory_kwargs))
layer_params: Tuple[Tensor, ...] = ()
Reported by Pylint.
Line: 91
Column: 34
w_ih = Parameter(torch.empty((gate_size, layer_input_size), **factory_kwargs))
w_hh = Parameter(torch.empty((gate_size, real_hidden_size), **factory_kwargs))
b_ih = Parameter(torch.empty(gate_size, **factory_kwargs))
# Second bias vector included for CuDNN compatibility. Only one
# bias vector is needed in standard definition.
b_hh = Parameter(torch.empty(gate_size, **factory_kwargs))
layer_params: Tuple[Tensor, ...] = ()
if self.proj_size == 0:
Reported by Pylint.
Line: 94
Column: 34
b_ih = Parameter(torch.empty(gate_size, **factory_kwargs))
# Second bias vector included for CuDNN compatibility. Only one
# bias vector is needed in standard definition.
b_hh = Parameter(torch.empty(gate_size, **factory_kwargs))
layer_params: Tuple[Tensor, ...] = ()
if self.proj_size == 0:
if bias:
layer_params = (w_ih, w_hh, b_ih, b_hh)
else:
Reported by Pylint.
Line: 102
Column: 38
else:
layer_params = (w_ih, w_hh)
else:
w_hr = Parameter(torch.empty((proj_size, hidden_size), **factory_kwargs))
if bias:
layer_params = (w_ih, w_hh, b_ih, b_hh, w_hr)
else:
layer_params = (w_ih, w_hh, w_hr)
Reported by Pylint.
caffe2/python/gradient_check_test.py
156 issues
Line: 1
Column: 3
# TODO(jiayq): as more and more tests are moving to hypothesis test, we
# can gradually remove this test script. DO NOT ADD MORE TESTS TO THIS
# FILE.
import numpy as np
Reported by Pylint.
Line: 88
Column: 22
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestFlatten(test_util.TestCase):
Reported by Pylint.
Line: 88
Column: 28
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestFlatten(test_util.TestCase):
Reported by Pylint.
Line: 100
Column: 18
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestConcat(test_util.TestCase):
Reported by Pylint.
Line: 100
Column: 24
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestConcat(test_util.TestCase):
Reported by Pylint.
Line: 133
Column: 32
res = device_checker.CheckSimple(op, Xs, [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, Xs, i,
[0])
self.assertTrue(res)
def testConcatNCHW(self):
for input_size, d1, d2, d3, d4 in self.test_configs:
Reported by Pylint.
Line: 133
Column: 26
res = device_checker.CheckSimple(op, Xs, [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, Xs, i,
[0])
self.assertTrue(res)
def testConcatNCHW(self):
for input_size, d1, d2, d3, d4 in self.test_configs:
Reported by Pylint.
Line: 157
Column: 32
res = device_checker.CheckSimple(op, Xs, [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, Xs, i,
[0])
self.assertTrue(res)
class TestRelu(test_util.TestCase):
Reported by Pylint.
Line: 157
Column: 26
res = device_checker.CheckSimple(op, Xs, [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, Xs, i,
[0])
self.assertTrue(res)
class TestRelu(test_util.TestCase):
Reported by Pylint.
Line: 186
Column: 28
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestTanh(test_util.TestCase):
Reported by Pylint.
test/quantization/eager/test_quantize_eager_qat.py
156 issues
Line: 2
Column: 1
import math
import torch
import torch.nn as nn
import torch.backends.mkldnn
from torch.nn import Conv2d, BatchNorm2d, ReLU, init
from torch.nn.intrinsic.qat import ConvBn2d, ConvBnReLU2d
from torch.nn.modules.utils import _pair
import torch.nn.quantized as nnq
from torch.quantization import (
Reported by Pylint.
Line: 3
Column: 1
import math
import torch
import torch.nn as nn
import torch.backends.mkldnn
from torch.nn import Conv2d, BatchNorm2d, ReLU, init
from torch.nn.intrinsic.qat import ConvBn2d, ConvBnReLU2d
from torch.nn.modules.utils import _pair
import torch.nn.quantized as nnq
from torch.quantization import (
Reported by Pylint.
Line: 4
Column: 1
import math
import torch
import torch.nn as nn
import torch.backends.mkldnn
from torch.nn import Conv2d, BatchNorm2d, ReLU, init
from torch.nn.intrinsic.qat import ConvBn2d, ConvBnReLU2d
from torch.nn.modules.utils import _pair
import torch.nn.quantized as nnq
from torch.quantization import (
Reported by Pylint.
Line: 5
Column: 1
import torch
import torch.nn as nn
import torch.backends.mkldnn
from torch.nn import Conv2d, BatchNorm2d, ReLU, init
from torch.nn.intrinsic.qat import ConvBn2d, ConvBnReLU2d
from torch.nn.modules.utils import _pair
import torch.nn.quantized as nnq
from torch.quantization import (
prepare,
Reported by Pylint.
Line: 6
Column: 1
import torch.nn as nn
import torch.backends.mkldnn
from torch.nn import Conv2d, BatchNorm2d, ReLU, init
from torch.nn.intrinsic.qat import ConvBn2d, ConvBnReLU2d
from torch.nn.modules.utils import _pair
import torch.nn.quantized as nnq
from torch.quantization import (
prepare,
convert,
Reported by Pylint.
Line: 7
Column: 1
import torch.backends.mkldnn
from torch.nn import Conv2d, BatchNorm2d, ReLU, init
from torch.nn.intrinsic.qat import ConvBn2d, ConvBnReLU2d
from torch.nn.modules.utils import _pair
import torch.nn.quantized as nnq
from torch.quantization import (
prepare,
convert,
prepare_qat,
Reported by Pylint.
Line: 8
Column: 1
from torch.nn import Conv2d, BatchNorm2d, ReLU, init
from torch.nn.intrinsic.qat import ConvBn2d, ConvBnReLU2d
from torch.nn.modules.utils import _pair
import torch.nn.quantized as nnq
from torch.quantization import (
prepare,
convert,
prepare_qat,
quantize_qat,
Reported by Pylint.
Line: 9
Column: 1
from torch.nn.intrinsic.qat import ConvBn2d, ConvBnReLU2d
from torch.nn.modules.utils import _pair
import torch.nn.quantized as nnq
from torch.quantization import (
prepare,
convert,
prepare_qat,
quantize_qat,
QuantStub,
Reported by Pylint.
Line: 20
Column: 1
default_qat_qconfig,
FixedQParamsFakeQuantize,
)
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
QuantStubModel,
ManualLinearQATModel,
Reported by Pylint.
Line: 22
Column: 1
)
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
QuantStubModel,
ManualLinearQATModel,
ManualConvLinearQATModel,
TwoLayerLinearModel,
Reported by Pylint.
test/test_static_runtime.py
156 issues
Line: 5
Column: 1
from typing import Dict, Optional
import numpy as np
import torch
from torch import nn
from torch.testing._internal.common_utils import TestCase, run_tests
class StaticModule:
Reported by Pylint.
Line: 6
Column: 1
import numpy as np
import torch
from torch import nn
from torch.testing._internal.common_utils import TestCase, run_tests
class StaticModule:
def __init__(self, scripted):
Reported by Pylint.
Line: 7
Column: 1
import numpy as np
import torch
from torch import nn
from torch.testing._internal.common_utils import TestCase, run_tests
class StaticModule:
def __init__(self, scripted):
# this is an nn.Module
Reported by Pylint.
Line: 34
Column: 5
def linear_shim(
input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None
) -> torch.Tensor:
output = input.matmul(weight.t())
if bias is not None:
output += bias
ret = output
Reported by Pylint.
Line: 47
Column: 42
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, hid_dim, n_heads, dropout, device):
super().__init__()
assert hid_dim % n_heads == 0
self.hid_dim = hid_dim
self.n_heads = n_heads
self.head_dim = hid_dim // n_heads
Reported by Pylint.
Line: 60
Column: 42
# self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([self.head_dim])).to(device)
def forward(self, query, key, value, mask):
batch_size = query.shape[0]
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
Reported by Pylint.
Line: 115
Column: 9
def loop_graph(a, b, iters: int):
c = a + b * 2
for i in range(iters):
c = c + b
c *= 2
c -= a
return c
Reported by Pylint.
Line: 170
Column: 9
HID_DIM = 256
QUERY_LEN = 8
BATCH_SIZE = 128
LAYERS = 3
HEADS = 8
DROPOUT = 0.1
device = torch.device("cpu")
attention = MultiHeadAttentionLayer(HID_DIM, HEADS, DROPOUT, device).to(device)
with torch.no_grad():
Reported by Pylint.
Line: 198
Column: 9
HID_DIM = 256
QUERY_LEN = 8
BATCH_SIZE = 128
LAYERS = 3
HEADS = 8
DROPOUT = 0.1
device = torch.device("cpu")
attention = MultiHeadAttentionLayer(HID_DIM, HEADS, DROPOUT, device).to(device)
with torch.no_grad():
Reported by Pylint.
Line: 212
Column: 9
attention_a = StaticModule(attention)
attention_a.benchmark([src, src, src, src_mask], {}, 2, 2)
metrics = attention_a.benchmark_individual_ops(
[src, src, src, src_mask], {}, 2, 2
)
def test_mlp(self):
# Arguments taken from benchmark script, ./bench/dlrm_s_benchmark.sh
Reported by Pylint.
test/jit/test_with.py
155 issues
Line: 6
Column: 1
from typing import Any, List
import torch
from torch.testing._internal.jit_utils import JitTestCase, make_global
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 7
Column: 1
from typing import Any, List
import torch
from torch.testing._internal.jit_utils import JitTestCase, make_global
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 451
Column: 13
def __init__(self):
self.count = 1
def __enter__(self, incr: int):
self.count += incr
def __exit__(self, type: Any, value: Any, tb: Any):
pass
Reported by Pylint.
Line: 469
Column: 13
def __enter__(self):
self.count += 1
def __exit__(self, type: Any, value: Any):
pass
@torch.jit.script
class ExitIncorrectTypes(object):
"""
Reported by Pylint.
Line: 512
Column: 13
return x
def test_enter_without_object():
with "not_object" as obj:
pass
test_tensor = torch.randn(5, dtype=torch.double)
with self.assertRaisesRegexWithHighlight(
Reported by Pylint.
Line: 48
Column: 32
self.count.add_(0.3)
return self.count
def __exit__(self, type: Any, value: Any, tb: Any) -> bool:
self.count.sub_(0.3)
return True
make_global(Context)
Reported by Pylint.
Line: 73
Column: 23
"""
c = Context(1)
with c as mult:
pass
x *= c.count
return x
Reported by Pylint.
Line: 91
Column: 9
x = y + y
return x
def test_conditional_early_return(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test that conditionally returning early from inside a with-statement works
as expected.
"""
with c as mult:
Reported by Pylint.
Line: 205
Column: 32
self.count.add_(0.3)
return self.count
def __exit__(self, type: Any, value: Any, tb: Any):
self.count.sub_(0.3)
make_global(Context)
def test_basic(x: torch.Tensor) -> torch.Tensor:
Reported by Pylint.
Line: 247
Column: 9
x = y + y
return x
def test_conditional_early_return(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test that conditionally returning early from inside a with-statement works
as expected.
"""
with c:
Reported by Pylint.
torch/nn/modules/module.py
154 issues
Line: 7
Column: 1
import functools
import torch
from ..parameter import Parameter
import torch.utils.hooks as hooks
from torch import Tensor, device, dtype
from typing import Union, Tuple, Any, Callable, Iterator, Set, Optional, overload, TypeVar, Mapping, Dict, List
from ...utils.hooks import RemovableHandle
Reported by Pylint.
Line: 10
Column: 1
from ..parameter import Parameter
import torch.utils.hooks as hooks
from torch import Tensor, device, dtype
from typing import Union, Tuple, Any, Callable, Iterator, Set, Optional, overload, TypeVar, Mapping, Dict, List
from ...utils.hooks import RemovableHandle
_grad_t = Union[Tuple[Tensor, ...], Tensor]
# See https://mypy.readthedocs.io/en/latest/generics.html#generic-methods-and-generic-self for the use
Reported by Pylint.
Line: 10
Column: 1
from ..parameter import Parameter
import torch.utils.hooks as hooks
from torch import Tensor, device, dtype
from typing import Union, Tuple, Any, Callable, Iterator, Set, Optional, overload, TypeVar, Mapping, Dict, List
from ...utils.hooks import RemovableHandle
_grad_t = Union[Tuple[Tensor, ...], Tensor]
# See https://mypy.readthedocs.io/en/latest/generics.html#generic-methods-and-generic-self for the use
Reported by Pylint.
Line: 12
Column: 1
from torch import Tensor, device, dtype
from typing import Union, Tuple, Any, Callable, Iterator, Set, Optional, overload, TypeVar, Mapping, Dict, List
from ...utils.hooks import RemovableHandle
_grad_t = Union[Tuple[Tensor, ...], Tensor]
# See https://mypy.readthedocs.io/en/latest/generics.html#generic-methods-and-generic-self for the use
# of `T` to annotate `self`. Many methods of `Module` return `self` and we want those return values to be
# the type of the subclass, not the looser type of `Module`.
Reported by Pylint.
Line: 539
Column: 16
module._apply(fn)
def compute_should_use_set_data(tensor, tensor_applied):
if torch._has_compatible_shallow_copy_type(tensor, tensor_applied):
# If the new tensor has compatible tensor type as the existing tensor,
# the current behavior is to change the tensor in-place using `.data =`,
# and the future behavior is to overwrite the existing tensor. However,
# changing the current behavior is a BC-breaking change, and we want it
# to happen in future releases. So for now we introduce the
Reported by Pylint.
Line: 746
Column: 38
Returns:
Module: self
"""
return self._apply(lambda t: torch.empty_like(t, device=device))
@overload
def to(self: T, device: Optional[Union[int, device]] = ..., dtype: Optional[Union[dtype, str]] = ...,
non_blocking: bool = ...) -> T:
...
Reported by Pylint.
Line: 41
Column: 1
return s
r"""This tracks hooks common to all modules that are executed before/after
calling forward and backward. This is global state used for debugging/profiling
purposes"""
_global_backward_hooks: Dict[int, Callable] = OrderedDict()
_global_is_full_backward_hook: Optional[bool] = None
_global_forward_pre_hooks: Dict[int, Callable] = OrderedDict()
Reported by Pylint.
Line: 128
Column: 5
``handle.remove()``
"""
global _global_is_full_backward_hook
if _global_is_full_backward_hook is True:
raise RuntimeError("Cannot use both regular backward hooks and full backward hooks as a "
"global Module hook. Please use only one of them.")
_global_is_full_backward_hook = False
Reported by Pylint.
Line: 176
Column: 5
``handle.remove()``
"""
global _global_is_full_backward_hook
if _global_is_full_backward_hook is False:
raise RuntimeError("Cannot use both regular backward hooks and full backward hooks as a "
"global Module hook. Please use only one of them.")
_global_is_full_backward_hook = True
Reported by Pylint.
Line: 191
Column: 1
# Trick mypy into not applying contravariance rules to inputs by defining
# forward as a value, rather than a function. See also
# https://github.com/python/mypy/issues/8795
def _forward_unimplemented(self, *input: Any) -> None:
r"""Defines the computation performed at every call.
Should be overridden by all subclasses.
.. note::
Reported by Pylint.
caffe2/python/operator_test/utility_ops_test.py
153 issues
Line: 7
Column: 1
from caffe2.python import core, workspace
from hypothesis import assume, given, settings
from caffe2.proto import caffe2_pb2
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 11
Column: 1
from caffe2.proto import caffe2_pb2
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import random
class TestUtilityOps(serial.SerializedTestCase):
Reported by Pylint.
Line: 45
Column: 30
"Slice", ["X", "starts", "ends"], ["Y"], device_option=gc
)
def slice_ref(x, starts, ends):
slc = [slice(None)] * x.ndim
slc[dim] = slice(slice_start, slice_end)
return [x[slc]]
inputs = [X, starts, ends]
Reported by Pylint.
Line: 45
Column: 38
"Slice", ["X", "starts", "ends"], ["Y"], device_option=gc
)
def slice_ref(x, starts, ends):
slc = [slice(None)] * x.ndim
slc[dim] = slice(slice_start, slice_end)
return [x[slc]]
inputs = [X, starts, ends]
Reported by Pylint.
Line: 84
Column: 73
engine=st.sampled_from(['CUDNN', None]),
**hu.gcs)
@settings(deadline=10000)
def test_transpose(self, dtype, ndims, seed, null_axes, engine, gc, dc):
if (gc.device_type == caffe2_pb2.CUDA and engine == "CUDNN"):
# cudnn 5.1 does not support int.
assume(workspace.GetCuDNNVersion() >= 6000 or dtype != np.int32)
dims = (np.random.rand(ndims) * 16 + 1).astype(np.int32)
Reported by Pylint.
Line: 116
Column: 49
@given(m=st.integers(5, 10), n=st.integers(5, 10),
o=st.integers(5, 10), nans=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_nan_check(self, m, n, o, nans, gc, dc):
other = np.array([1, 2, 3]).astype(np.float32)
X = np.random.rand(m, n, o).astype(np.float32)
if nans:
x_nan = np.random.randint(0, m)
y_nan = np.random.randint(0, n)
Reported by Pylint.
Line: 128
Column: 30
# print('nans: {}'.format(nans))
# print(X)
def nan_reference(X, Y):
if not np.isnan(X).any():
return [X]
else:
return [np.array([])]
Reported by Pylint.
Line: 148
Column: 17
reference=nan_reference,
)
if nans:
self.assertTrue(False, "Did not fail when presented with NaN!")
except RuntimeError:
self.assertTrue(nans, "No NaNs but failed")
try:
self.assertGradientChecks(
Reported by Pylint.
Line: 161
Column: 17
outputs_with_grads=[0],
)
if nans:
self.assertTrue(False, "Did not fail when gradient had NaN!")
except RuntimeError:
pass
@serial.given(n=st.integers(4, 5), m=st.integers(6, 7),
d=st.integers(2, 3), **hu.gcs)
Reported by Pylint.
Line: 336
Column: 47
),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_lengths_gather(self, inputs, gc, dc):
items = inputs[0]
lengths = inputs[1]
indices = inputs[2]
def lengths_gather_op(items, lengths, indices):
Reported by Pylint.
torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py
153 issues
Line: 74
Column: 22
""" A feature set has 2 types of features"""
dense_features: torch.Tensor
sparse_features: torch.LongTensor
values: torch.Tensor
def _call_method(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
Reported by Pylint.
Line: 100
Column: 21
self.em = nn.EmbeddingBag(
num_embeddings,
embedding_dim,
_weight=torch.tensor([init_em] * num_embeddings),
)
def forward(self, input: torch.Tensor):
gLogger.debug(f"Running RemoteEM.forward() on: {input}")
return self.em(input, offsets=torch.LongTensor(range(input.shape[0])))
Reported by Pylint.
Line: 105
Column: 39
def forward(self, input: torch.Tensor):
gLogger.debug(f"Running RemoteEM.forward() on: {input}")
return self.em(input, offsets=torch.LongTensor(range(input.shape[0])))
# Return a linear module with predefined parameters.
def getLinear(d_in, d_out):
l = nn.Linear(d_in, d_out, bias=False)
Reported by Pylint.
Line: 111
Column: 9
# Return a linear module with predefined parameters.
def getLinear(d_in, d_out):
l = nn.Linear(d_in, d_out, bias=False)
w = torch.ones((d_out, d_in))
w[0][0] = -1
w.requires_grad_()
l.weight.data = w
return l
Reported by Pylint.
Line: 170
Column: 13
# The same size of mini batch.
assert sparse.shape[0] == input.dense_features.shape[0]
dense = self.fc1(input.dense_features)
x = torch.cat((dense, sparse), 1)
gLogger.debug(f"Concatenated feature: {x}")
x = _remote_method(RemoteNet.forward, self.remote_net_rref, x)
return self.fc2(x)
Reported by Pylint.
Line: 273
Column: 24
def get_training_examples():
n = 16
training_examples = FeatureSet(
dense_features=torch.zeros((n, D_DENSE)),
sparse_features=torch.zeros(n, dtype=torch.long),
values=torch.zeros(n),
)
idx = 0
# Every example has another one that has exactly the same features but an
Reported by Pylint.
Line: 274
Column: 46
n = 16
training_examples = FeatureSet(
dense_features=torch.zeros((n, D_DENSE)),
sparse_features=torch.zeros(n, dtype=torch.long),
values=torch.zeros(n),
)
idx = 0
# Every example has another one that has exactly the same features but an
# opposite value. Therefore, their grads cancel each other in all-reduce.
Reported by Pylint.
Line: 274
Column: 25
n = 16
training_examples = FeatureSet(
dense_features=torch.zeros((n, D_DENSE)),
sparse_features=torch.zeros(n, dtype=torch.long),
values=torch.zeros(n),
)
idx = 0
# Every example has another one that has exactly the same features but an
# opposite value. Therefore, their grads cancel each other in all-reduce.
Reported by Pylint.
Line: 275
Column: 16
training_examples = FeatureSet(
dense_features=torch.zeros((n, D_DENSE)),
sparse_features=torch.zeros(n, dtype=torch.long),
values=torch.zeros(n),
)
idx = 0
# Every example has another one that has exactly the same features but an
# opposite value. Therefore, their grads cancel each other in all-reduce.
for value in (-1, 1):
Reported by Pylint.
Line: 284
Column: 64
for x in (-1.0 * value, 1.0 * value):
for y in (1.0 * value, -1.0 * value):
for z in (0, 1):
training_examples.dense_features[idx, :] = torch.tensor((x, y))
training_examples.sparse_features[idx] = z
training_examples.values[idx] = value
idx += 1
# Split the examples among NUM_TRAINERS trainers
Reported by Pylint.
torch/nn/modules/pooling.py
152 issues
Line: 4
Column: 1
from typing import List, Optional
from torch import Tensor
from .module import Module
from .utils import _single, _pair, _triple
from .. import functional as F
from ..common_types import (_size_any_t, _size_1_t, _size_2_t, _size_3_t,
_ratio_3_t, _ratio_2_t, _size_any_opt_t, _size_2_opt_t, _size_3_opt_t)
Reported by Pylint.
Line: 5
Column: 1
from torch import Tensor
from .module import Module
from .utils import _single, _pair, _triple
from .. import functional as F
from ..common_types import (_size_any_t, _size_1_t, _size_2_t, _size_3_t,
_ratio_3_t, _ratio_2_t, _size_any_opt_t, _size_2_opt_t, _size_3_opt_t)
Reported by Pylint.
Line: 6
Column: 1
from torch import Tensor
from .module import Module
from .utils import _single, _pair, _triple
from .. import functional as F
from ..common_types import (_size_any_t, _size_1_t, _size_2_t, _size_3_t,
_ratio_3_t, _ratio_2_t, _size_any_opt_t, _size_2_opt_t, _size_3_opt_t)
Reported by Pylint.
Line: 8
Column: 1
from .utils import _single, _pair, _triple
from .. import functional as F
from ..common_types import (_size_any_t, _size_1_t, _size_2_t, _size_3_t,
_ratio_3_t, _ratio_2_t, _size_any_opt_t, _size_2_opt_t, _size_3_opt_t)
class _MaxPoolNd(Module):
__constants__ = ['kernel_size', 'stride', 'padding', 'dilation',
Reported by Pylint.
Line: 87
Column: 23
padding: _size_1_t
dilation: _size_1_t
def forward(self, input: Tensor) -> Tensor:
return F.max_pool1d(input, self.kernel_size, self.stride,
self.padding, self.dilation, self.ceil_mode,
self.return_indices)
Reported by Pylint.
Line: 161
Column: 23
padding: _size_2_t
dilation: _size_2_t
def forward(self, input: Tensor) -> Tensor:
return F.max_pool2d(input, self.kernel_size, self.stride,
self.padding, self.dilation, self.ceil_mode,
self.return_indices)
Reported by Pylint.
Line: 239
Column: 23
padding: _size_3_t
dilation: _size_3_t
def forward(self, input: Tensor) -> Tensor:
return F.max_pool3d(input, self.kernel_size, self.stride,
self.padding, self.dilation, self.ceil_mode,
self.return_indices)
Reported by Pylint.
Line: 317
Column: 23
self.stride = _single(stride if (stride is not None) else kernel_size)
self.padding = _single(padding)
def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
return F.max_unpool1d(input, indices, self.kernel_size, self.stride,
self.padding, output_size)
class MaxUnpool2d(_MaxUnpoolNd):
Reported by Pylint.
Line: 394
Column: 23
self.stride = _pair(stride if (stride is not None) else kernel_size)
self.padding = _pair(padding)
def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
return F.max_unpool2d(input, indices, self.kernel_size, self.stride,
self.padding, output_size)
class MaxUnpool3d(_MaxUnpoolNd):
Reported by Pylint.
Line: 460
Column: 23
self.stride = _triple(stride if (stride is not None) else kernel_size)
self.padding = _triple(padding)
def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
return F.max_unpool3d(input, indices, self.kernel_size, self.stride,
self.padding, output_size)
class _AvgPoolNd(Module):
Reported by Pylint.
caffe2/python/operator_test/elementwise_op_broadcast_test.py
151 issues
Line: 8
Column: 1
import unittest
from hypothesis import given, assume, settings
import hypothesis.strategies as st
import numpy as np
import operator
from caffe2.proto import caffe2_pb2
Reported by Pylint.
Line: 9
Column: 1
import unittest
from hypothesis import given, assume, settings
import hypothesis.strategies as st
import numpy as np
import operator
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
Reported by Pylint.
Line: 378
Column: 9
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1, axis=0)
def ref_op(X, Y):
res = np.sum(X, axis=3)
res = np.sum(res, axis=2)
return [res]
self.assertReferenceChecks(
Reported by Pylint.
Line: 396
Column: 9
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1, axis=1)
def ref_op(X, Y):
res = np.sum(X, axis=0)
res = np.sum(res, axis=2)
return [res]
self.assertReferenceChecks(
Reported by Pylint.
Line: 414
Column: 9
op = core.CreateOperator(
"SumReduceLike", ["X", "Y"], "out", broadcast=1)
def ref_op(X, Y):
res = np.sum(X, axis=0)
res = np.sum(res, axis=2)
return [res.reshape(Y.shape)]
self.assertReferenceChecks(
Reported by Pylint.
Line: 19
Column: 3
import caffe2.python.serialized_test.serialized_test_util as serial
# TODO(jiayq): make them hypothesis tests for better coverage.
class TestElementwiseBroadcast(serial.SerializedTestCase):
def __generate_test_cases(self, allow_broadcast_fastpath: bool):
"""
generates a set of test cases
Reported by Pylint.
Line: 103
Column: 39
@given(**hu.gcs)
@settings(deadline=None)
def test_broadcast_powt(self, gc, dc):
np.random.seed(101)
#operator
def powt_op(X, Y):
return [np.power(X, Y)]
Reported by Pylint.
Line: 207
Column: 69
grad_reference=powt_grad_mixed)
@given(allow_broadcast_fastpath=st.booleans(), **hu.gcs)
def test_broadcast_scalar(self, allow_broadcast_fastpath: bool, gc, dc):
# broadcasting constant
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(1).astype(np.float32)
op = core.CreateOperator(
"Add", ["X", "Y"], "out", broadcast=1, allow_broadcast_fastpath=allow_broadcast_fastpath
Reported by Pylint.
Line: 237
Column: 71
self.assertDeviceChecks(dc, op, [X, Y], [0])
@given(allow_broadcast_fastpath=st.booleans(), **hu.gcs)
def test_semantic_broadcast(self, allow_broadcast_fastpath: bool, gc, dc):
# NCHW as default
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
Y = np.random.rand(3).astype(np.float32)
op = core.CreateOperator(
"Add", ["X", "Y"], "out", broadcast=1, axis_str="C",
Reported by Pylint.
Line: 266
Column: 46
self.assertDeviceChecks(dc, op, [X, Y], [0])
@given(**hu.gcs)
def test_sum_reduce_empty_blob(self, gc, dc):
net = core.Net('test')
with core.DeviceScope(gc):
net.GivenTensorFill([], ["X"], values=[], shape=[2, 0, 5])
net.GivenTensorFill([], ["Y"], values=[], shape=[2, 0])
Reported by Pylint.