The following issues were found
caffe2/python/control.py
44 issues
Line: 30
Column: 5
def _get_next_step_name(control_name, base_name):
global _current_idx, _used_step_names
concat_name = '%s/%s' % (base_name, control_name)
next_name = concat_name
while next_name in _used_step_names:
next_name = '%s_%d' % (concat_name, _current_idx)
_current_idx += 1
Reported by Pylint.
Line: 40
Column: 15
return next_name
def _MakeList(input):
""" input is a tuple.
Example:
(a, b, c) --> [a, b, c]
(a) --> [a]
([a, b, c]) --> [a, b, c]
Reported by Pylint.
Line: 204
Column: 31
else:
last_cond = merged_net.__getattr__(relation)([last_cond, curr_cond])
# merge attributes
for k, v in viewitems(condition_nets[i]._attr_dict):
merged_net._attr_dict[k] += v
merged_net.AddExternalOutput(last_cond)
return merged_net
Reported by Pylint.
Line: 205
Column: 13
last_cond = merged_net.__getattr__(relation)([last_cond, curr_cond])
# merge attributes
for k, v in viewitems(condition_nets[i]._attr_dict):
merged_net._attr_dict[k] += v
merged_net.AddExternalOutput(last_cond)
return merged_net
Reported by Pylint.
Line: 25
Column: 1
# Used to generate names of the steps created by the control functions.
# It is actually the internal index of these steps.
_current_idx = 1
_used_step_names = set()
def _get_next_step_name(control_name, base_name):
global _current_idx, _used_step_names
Reported by Pylint.
Line: 30
Column: 5
def _get_next_step_name(control_name, base_name):
global _current_idx, _used_step_names
concat_name = '%s/%s' % (base_name, control_name)
next_name = concat_name
while next_name in _used_step_names:
next_name = '%s_%d' % (concat_name, _current_idx)
_current_idx += 1
Reported by Pylint.
Line: 30
Column: 5
def _get_next_step_name(control_name, base_name):
global _current_idx, _used_step_names
concat_name = '%s/%s' % (base_name, control_name)
next_name = concat_name
while next_name in _used_step_names:
next_name = '%s_%d' % (concat_name, _current_idx)
_current_idx += 1
Reported by Pylint.
Line: 40
Column: 1
return next_name
def _MakeList(input):
""" input is a tuple.
Example:
(a, b, c) --> [a, b, c]
(a) --> [a]
([a, b, c]) --> [a, b, c]
Reported by Pylint.
Line: 47
Column: 5
(a) --> [a]
([a, b, c]) --> [a, b, c]
"""
if len(input) == 0:
raise ValueError(
'input cannot be empty.')
elif len(input) == 1:
output = input[0]
if not isinstance(output, list):
Reported by Pylint.
Line: 59
Column: 1
return output
def _IsNets(nets_or_steps):
if isinstance(nets_or_steps, list):
return all(isinstance(n, core.Net) for n in nets_or_steps)
else:
return isinstance(nets_or_steps, core.Net)
Reported by Pylint.
benchmarks/fastrnns/fuser.py
44 issues
Line: 1
Column: 1
import torch
def set_fuser(fuser_name, executor_name):
assert fuser_name in ['te', 'old', 'none', 'default']
if fuser_name == 'te':
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
Reported by Pylint.
Line: 6
Column: 9
def set_fuser(fuser_name, executor_name):
assert fuser_name in ['te', 'old', 'none', 'default']
if fuser_name == 'te':
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
elif fuser_name == 'old':
Reported by Pylint.
Line: 6
Column: 9
def set_fuser(fuser_name, executor_name):
assert fuser_name in ['te', 'old', 'none', 'default']
if fuser_name == 'te':
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
elif fuser_name == 'old':
Reported by Pylint.
Line: 7
Column: 9
assert fuser_name in ['te', 'old', 'none', 'default']
if fuser_name == 'te':
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
elif fuser_name == 'old':
torch._C._jit_set_profiling_executor(False)
Reported by Pylint.
Line: 7
Column: 9
assert fuser_name in ['te', 'old', 'none', 'default']
if fuser_name == 'te':
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
elif fuser_name == 'old':
torch._C._jit_set_profiling_executor(False)
Reported by Pylint.
Line: 8
Column: 9
if fuser_name == 'te':
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
elif fuser_name == 'old':
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
Reported by Pylint.
Line: 8
Column: 9
if fuser_name == 'te':
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
elif fuser_name == 'old':
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
Reported by Pylint.
Line: 9
Column: 9
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
elif fuser_name == 'old':
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
torch._C._jit_override_can_fuse_on_gpu(True)
Reported by Pylint.
Line: 9
Column: 9
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
elif fuser_name == 'old':
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
torch._C._jit_override_can_fuse_on_gpu(True)
Reported by Pylint.
Line: 10
Column: 9
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
elif fuser_name == 'old':
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
Reported by Pylint.
caffe2/python/operator_test/partition_ops_test.py
44 issues
Line: 56
Column: 34
def sharding(x):
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
for ind, v in enumerate(x):
suffix_shape = v.shape[len(x[0].shape):]
accum = []
Reported by Pylint.
Line: 58
Column: 32
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
for ind, v in enumerate(x):
suffix_shape = v.shape[len(x[0].shape):]
accum = []
data = v.reshape((-1, ) + suffix_shape)
Reported by Pylint.
Line: 64
Column: 28
accum = []
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
Reported by Pylint.
Line: 65
Column: 44
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
Reported by Pylint.
Line: 73
Column: 63
def join(a):
if not a:
return np.empty(shape=(0, ) + suffix_shape)
return np.stack(a)
out.append(join(accum))
return out
Reported by Pylint.
Line: 140
Column: 34
def sharding(x):
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
idx = 0
sharded_lengths = np.zeros(elements)
for ind, length in enumerate(lengths):
Reported by Pylint.
Line: 142
Column: 32
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
idx = 0
sharded_lengths = np.zeros(elements)
for ind, length in enumerate(lengths):
for _ in range(length):
if shards[idx] == i:
Reported by Pylint.
Line: 144
Column: 48
out = []
for i in range(parts):
idx = 0
sharded_lengths = np.zeros(elements)
for ind, length in enumerate(lengths):
for _ in range(length):
if shards[idx] == i:
sharded_lengths[ind] += 1
idx += 1
Reported by Pylint.
Line: 145
Column: 50
for i in range(parts):
idx = 0
sharded_lengths = np.zeros(elements)
for ind, length in enumerate(lengths):
for _ in range(length):
if shards[idx] == i:
sharded_lengths[ind] += 1
idx += 1
out.append(sharded_lengths)
Reported by Pylint.
Line: 157
Column: 28
accum = []
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
Reported by Pylint.
torch/quasirandom.py
44 issues
Line: 56
Column: 15
self.scramble = scramble
self.dimension = dimension
cpu = torch.device("cpu")
self.sobolstate = torch.zeros(dimension, self.MAXBIT, device=cpu, dtype=torch.long)
torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)
if not self.scramble:
Reported by Pylint.
Line: 58
Column: 81
cpu = torch.device("cpu")
self.sobolstate = torch.zeros(dimension, self.MAXBIT, device=cpu, dtype=torch.long)
torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)
if not self.scramble:
self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long)
else:
Reported by Pylint.
Line: 58
Column: 27
cpu = torch.device("cpu")
self.sobolstate = torch.zeros(dimension, self.MAXBIT, device=cpu, dtype=torch.long)
torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)
if not self.scramble:
self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long)
else:
Reported by Pylint.
Line: 59
Column: 9
cpu = torch.device("cpu")
self.sobolstate = torch.zeros(dimension, self.MAXBIT, device=cpu, dtype=torch.long)
torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)
if not self.scramble:
self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long)
else:
self._scramble()
Reported by Pylint.
Line: 62
Column: 72
torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)
if not self.scramble:
self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long)
else:
self._scramble()
self.quasi = self.shift.clone(memory_format=torch.contiguous_format)
self._first_point = (self.quasi / 2 ** self.MAXBIT).reshape(1, -1)
Reported by Pylint.
Line: 62
Column: 26
torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)
if not self.scramble:
self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long)
else:
self._scramble()
self.quasi = self.shift.clone(memory_format=torch.contiguous_format)
self._first_point = (self.quasi / 2 ** self.MAXBIT).reshape(1, -1)
Reported by Pylint.
Line: 66
Column: 53
else:
self._scramble()
self.quasi = self.shift.clone(memory_format=torch.contiguous_format)
self._first_point = (self.quasi / 2 ** self.MAXBIT).reshape(1, -1)
self.num_generated = 0
def draw(self, n: int = 1, out: Optional[torch.Tensor] = None,
dtype: torch.dtype = torch.float32) -> torch.Tensor:
Reported by Pylint.
Line: 71
Column: 35
self.num_generated = 0
def draw(self, n: int = 1, out: Optional[torch.Tensor] = None,
dtype: torch.dtype = torch.float32) -> torch.Tensor:
r"""
Function to draw a sequence of :attr:`n` points from a Sobol sequence.
Note that the samples are dependent on the previous samples. The size
of the result is :math:`(n, dimension)`.
Reported by Pylint.
Line: 71
Column: 21
self.num_generated = 0
def draw(self, n: int = 1, out: Optional[torch.Tensor] = None,
dtype: torch.dtype = torch.float32) -> torch.Tensor:
r"""
Function to draw a sequence of :attr:`n` points from a Sobol sequence.
Note that the samples are dependent on the previous samples. The size
of the result is :math:`(n, dimension)`.
Reported by Pylint.
Line: 89
Column: 38
if n == 1:
result = self._first_point.to(dtype)
else:
result, self.quasi = torch._sobol_engine_draw(
self.quasi, n - 1, self.sobolstate, self.dimension, self.num_generated, dtype=dtype,
)
result = torch.cat((self._first_point, result), dim=-2)
else:
result, self.quasi = torch._sobol_engine_draw(
Reported by Pylint.
torch/nn/modules/__init__.py
43 issues
Line: 1
Column: 1
from .module import Module
from .linear import Identity, Linear, Bilinear, LazyLinear
from .conv import Conv1d, Conv2d, Conv3d, \
ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
Hardsigmoid, Hardswish, SiLU, Mish
Reported by Pylint.
Line: 2
Column: 1
from .module import Module
from .linear import Identity, Linear, Bilinear, LazyLinear
from .conv import Conv1d, Conv2d, Conv3d, \
ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
Hardsigmoid, Hardswish, SiLU, Mish
Reported by Pylint.
Line: 3
Column: 1
from .module import Module
from .linear import Identity, Linear, Bilinear, LazyLinear
from .conv import Conv1d, Conv2d, Conv3d, \
ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
Hardsigmoid, Hardswish, SiLU, Mish
Reported by Pylint.
Line: 6
Column: 1
from .conv import Conv1d, Conv2d, Conv3d, \
ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
Hardsigmoid, Hardswish, SiLU, Mish
from .loss import L1Loss, NLLLoss, KLDivLoss, MSELoss, BCELoss, BCEWithLogitsLoss, NLLLoss2d, \
CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \
Reported by Pylint.
Line: 10
Column: 1
Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
Hardsigmoid, Hardswish, SiLU, Mish
from .loss import L1Loss, NLLLoss, KLDivLoss, MSELoss, BCELoss, BCEWithLogitsLoss, NLLLoss2d, \
CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \
MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, SmoothL1Loss, HuberLoss, \
SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss, PoissonNLLLoss, GaussianNLLLoss
from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
Reported by Pylint.
Line: 14
Column: 1
CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \
MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, SmoothL1Loss, HuberLoss, \
SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss, PoissonNLLLoss, GaussianNLLLoss
from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, \
AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
Reported by Pylint.
Line: 15
Column: 1
MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, SmoothL1Loss, HuberLoss, \
SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss, PoissonNLLLoss, GaussianNLLLoss
from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, \
AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
Reported by Pylint.
Line: 18
Column: 1
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, \
AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
LazyInstanceNorm1d, LazyInstanceNorm2d, LazyInstanceNorm3d
from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
from .dropout import Dropout, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout
Reported by Pylint.
Line: 20
Column: 1
AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
LazyInstanceNorm1d, LazyInstanceNorm2d, LazyInstanceNorm3d
from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
from .dropout import Dropout, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout
from .padding import ReflectionPad1d, ReflectionPad2d, ReflectionPad3d, ReplicationPad1d, ReplicationPad2d, \
ReplicationPad3d, ZeroPad2d, ConstantPad1d, ConstantPad2d, ConstantPad3d
Reported by Pylint.
Line: 22
Column: 1
LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
LazyInstanceNorm1d, LazyInstanceNorm2d, LazyInstanceNorm3d
from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
from .dropout import Dropout, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout
from .padding import ReflectionPad1d, ReflectionPad2d, ReflectionPad3d, ReplicationPad1d, ReplicationPad2d, \
ReplicationPad3d, ZeroPad2d, ConstantPad1d, ConstantPad2d, ConstantPad3d
from .sparse import Embedding, EmbeddingBag
from .rnn import RNNBase, RNN, LSTM, GRU, \
Reported by Pylint.
test/test_autocast.py
43 issues
Line: 2
Column: 1
import collections
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
class TestAutocastCPU(TestCase):
def setUp(self):
super(TestAutocastCPU, self).setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))
Reported by Pylint.
Line: 3
Column: 1
import collections
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
class TestAutocastCPU(TestCase):
def setUp(self):
super(TestAutocastCPU, self).setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))
Reported by Pylint.
Line: 4
Column: 1
import collections
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
class TestAutocastCPU(TestCase):
def setUp(self):
super(TestAutocastCPU, self).setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))
Reported by Pylint.
Line: 106
Column: 76
def test_autocast_nn_bf16(self):
for op, args in self.autocast_lists.nn_bf16:
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
Reported by Pylint.
Line: 106
Column: 76
def test_autocast_nn_bf16(self):
for op, args in self.autocast_lists.nn_bf16:
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
Reported by Pylint.
Line: 116
Column: 75
def test_autocast_nn_fp32(self):
for op_with_args in self.autocast_lists.nn_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn, add_kwargs=maybe_kwargs)
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
Reported by Pylint.
Line: 116
Column: 75
def test_autocast_nn_fp32(self):
for op_with_args in self.autocast_lists.nn_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn, add_kwargs=maybe_kwargs)
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
Reported by Pylint.
Line: 1
Column: 1
import collections
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
class TestAutocastCPU(TestCase):
def setUp(self):
super(TestAutocastCPU, self).setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))
Reported by Pylint.
Line: 6
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
class TestAutocastCPU(TestCase):
def setUp(self):
super(TestAutocastCPU, self).setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))
def tearDown(self):
Reported by Pylint.
Line: 7
Column: 5
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
class TestAutocastCPU(TestCase):
def setUp(self):
super(TestAutocastCPU, self).setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))
def tearDown(self):
del self.autocast_lists
Reported by Pylint.
caffe2/python/ideep/elementwise_sum_op_test.py
43 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 16
Column: 22
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ElementwiseSumTest(hu.HypothesisTestCase):
@given(size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inputs=st.integers(2, 7),
Reported by Pylint.
Line: 30
Column: 34
batch_size,
inputs,
inplace,
gc,
dc):
op = core.CreateOperator(
"Sum",
["X_{}".format(i) for i in range(inputs)],
["X_0" if inplace else "Y"],
Reported by Pylint.
Line: 54
Column: 39
batch_size,
inputs,
inplace,
gc,
dc):
op = core.CreateOperator(
"Sum",
["X_{}".format(i) for i in range(inputs)],
["X_0" if inplace else "Y"],
Reported by Pylint.
Line: 79
Column: 13
print(Y.flatten())
print(sum_val.flatten())
print(np.max(np.abs(Y - sum_val)))
self.assertTrue(False)
@given(size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
Reported by Pylint.
Line: 94
Column: 34
batch_size,
inputs,
inplace,
gc,
dc):
sum_fp32 = core.CreateOperator(
"Sum",
["X_{}".format(i) for i in range(inputs)],
["X_0" if inplace else "Y"],
Reported by Pylint.
Line: 150
Column: 9
)
net.op.extend([sw2nhwc, quantize])
sum = core.CreateOperator(
"Int8Sum",
["Xi_{}_quantized".format(i) for i in range(inputs)],
["Xi_0_quantized" if inplace else "Y_quantized"],
engine="DNNLOWP",
device_option=dc[1],
Reported by Pylint.
Line: 185
Column: 13
print(Y_out.flatten())
print(np.max(np.abs(Y_out - Y)))
print("MSE", MSE)
self.assertTrue(False)
workspace.SwitchWorkspace(old_ws_name)
if __name__ == "__main__":
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
torch/autocast_mode.py
43 issues
Line: 133
Column: 31
def __init__(self, device_type, enabled=True, **kwargs):
self.device = device_type
if self.device == 'cuda':
self.fast_dtype = torch.get_autocast_gpu_dtype()
elif self.device == 'cpu':
self.fast_dtype = torch.get_autocast_cpu_dtype()
else:
raise RuntimeError('User specified autocast device_type must be \'cuda\' or \'cpu\'')
if torch.cuda.amp.common.amp_definitely_not_available() and self.device == 'cuda':
Reported by Pylint.
Line: 135
Column: 31
if self.device == 'cuda':
self.fast_dtype = torch.get_autocast_gpu_dtype()
elif self.device == 'cpu':
self.fast_dtype = torch.get_autocast_cpu_dtype()
else:
raise RuntimeError('User specified autocast device_type must be \'cuda\' or \'cpu\'')
if torch.cuda.amp.common.amp_definitely_not_available() and self.device == 'cuda':
warnings.warn('User provided device_type of \'cuda\', but CUDA is not available. Disabling')
enabled = False
Reported by Pylint.
Line: 148
Column: 32
raise RuntimeError('Unrecognized optional argument supplied to autocast context manager: ' + str(key))
if self.device == 'cpu':
supported_dtype = [torch.bfloat16]
if self.fast_dtype not in supported_dtype:
error_message = 'In CPU autocast, but the target dtype is not supported. Disabling autocast.\n'
error_message += 'CPU Autocast only supports dtype of torch.bfloat16 currently.'
warnings.warn(error_message)
enabled = False
Reported by Pylint.
Line: 155
Column: 35
warnings.warn(error_message)
enabled = False
if self.device == 'cuda':
if self.fast_dtype == torch.bfloat16 and torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8:
raise RuntimeError('Current CUDA Device does not support bfloat16. Switching fast_dtype to float16.')
self._enabled = enabled
def __enter__(self):
if self.device == 'cpu':
Reported by Pylint.
Line: 161
Column: 25
def __enter__(self):
if self.device == 'cpu':
self.prev = torch.is_autocast_cpu_enabled()
self.prev_fastdtype = torch.get_autocast_cpu_dtype()
torch.set_autocast_cpu_enabled(self._enabled)
torch.set_autocast_cpu_dtype(self.fast_dtype)
torch.autocast_increment_nesting()
else:
Reported by Pylint.
Line: 162
Column: 35
def __enter__(self):
if self.device == 'cpu':
self.prev = torch.is_autocast_cpu_enabled()
self.prev_fastdtype = torch.get_autocast_cpu_dtype()
torch.set_autocast_cpu_enabled(self._enabled)
torch.set_autocast_cpu_dtype(self.fast_dtype)
torch.autocast_increment_nesting()
else:
self.prev = torch.is_autocast_enabled()
Reported by Pylint.
Line: 163
Column: 13
if self.device == 'cpu':
self.prev = torch.is_autocast_cpu_enabled()
self.prev_fastdtype = torch.get_autocast_cpu_dtype()
torch.set_autocast_cpu_enabled(self._enabled)
torch.set_autocast_cpu_dtype(self.fast_dtype)
torch.autocast_increment_nesting()
else:
self.prev = torch.is_autocast_enabled()
self.prev_fastdtype = torch.get_autocast_gpu_dtype()
Reported by Pylint.
Line: 164
Column: 13
self.prev = torch.is_autocast_cpu_enabled()
self.prev_fastdtype = torch.get_autocast_cpu_dtype()
torch.set_autocast_cpu_enabled(self._enabled)
torch.set_autocast_cpu_dtype(self.fast_dtype)
torch.autocast_increment_nesting()
else:
self.prev = torch.is_autocast_enabled()
self.prev_fastdtype = torch.get_autocast_gpu_dtype()
torch.set_autocast_gpu_dtype(self.fast_dtype)
Reported by Pylint.
Line: 165
Column: 13
self.prev_fastdtype = torch.get_autocast_cpu_dtype()
torch.set_autocast_cpu_enabled(self._enabled)
torch.set_autocast_cpu_dtype(self.fast_dtype)
torch.autocast_increment_nesting()
else:
self.prev = torch.is_autocast_enabled()
self.prev_fastdtype = torch.get_autocast_gpu_dtype()
torch.set_autocast_gpu_dtype(self.fast_dtype)
torch.set_autocast_enabled(self._enabled)
Reported by Pylint.
Line: 167
Column: 25
torch.set_autocast_cpu_dtype(self.fast_dtype)
torch.autocast_increment_nesting()
else:
self.prev = torch.is_autocast_enabled()
self.prev_fastdtype = torch.get_autocast_gpu_dtype()
torch.set_autocast_gpu_dtype(self.fast_dtype)
torch.set_autocast_enabled(self._enabled)
torch.autocast_increment_nesting()
Reported by Pylint.
torch/distributions/__init__.py
43 issues
Line: 74
Column: 1
loss.backward()
"""
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
Reported by Pylint.
Line: 75
Column: 1
"""
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
Reported by Pylint.
Line: 76
Column: 1
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
Reported by Pylint.
Line: 77
Column: 1
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
Reported by Pylint.
Line: 78
Column: 1
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
Reported by Pylint.
Line: 79
Column: 1
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
Reported by Pylint.
Line: 80
Column: 1
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
from .exponential import Exponential
Reported by Pylint.
Line: 81
Column: 1
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
from .exponential import Exponential
from .fishersnedecor import FisherSnedecor
Reported by Pylint.
Line: 82
Column: 1
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
from .exponential import Exponential
from .fishersnedecor import FisherSnedecor
from .gamma import Gamma
Reported by Pylint.
Line: 83
Column: 1
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
from .exponential import Exponential
from .fishersnedecor import FisherSnedecor
from .gamma import Gamma
from .geometric import Geometric
Reported by Pylint.
torch/quantization/_learnable_fake_quantize.py
43 issues
Line: 40
Column: 36
observer_kwargs["quant_max"] = quant_max
self.use_grad_scaling = use_grad_scaling
if channel_len == -1:
self.scale = Parameter(torch.tensor([scale]))
self.zero_point = Parameter(torch.tensor([zero_point]))
else:
assert isinstance(channel_len, int) and channel_len > 0, "Channel size must be a positive integer."
self.scale = Parameter(torch.tensor([scale] * channel_len))
self.zero_point = Parameter(torch.tensor([zero_point] * channel_len))
Reported by Pylint.
Line: 41
Column: 41
self.use_grad_scaling = use_grad_scaling
if channel_len == -1:
self.scale = Parameter(torch.tensor([scale]))
self.zero_point = Parameter(torch.tensor([zero_point]))
else:
assert isinstance(channel_len, int) and channel_len > 0, "Channel size must be a positive integer."
self.scale = Parameter(torch.tensor([scale] * channel_len))
self.zero_point = Parameter(torch.tensor([zero_point] * channel_len))
Reported by Pylint.
Line: 44
Column: 36
self.zero_point = Parameter(torch.tensor([zero_point]))
else:
assert isinstance(channel_len, int) and channel_len > 0, "Channel size must be a positive integer."
self.scale = Parameter(torch.tensor([scale] * channel_len))
self.zero_point = Parameter(torch.tensor([zero_point] * channel_len))
self.activation_post_process = observer(**observer_kwargs)
assert torch.iinfo(self.activation_post_process.dtype).min <= quant_min, \
'quant_min out of bound'
Reported by Pylint.
Line: 45
Column: 41
else:
assert isinstance(channel_len, int) and channel_len > 0, "Channel size must be a positive integer."
self.scale = Parameter(torch.tensor([scale] * channel_len))
self.zero_point = Parameter(torch.tensor([zero_point] * channel_len))
self.activation_post_process = observer(**observer_kwargs)
assert torch.iinfo(self.activation_post_process.dtype).min <= quant_min, \
'quant_min out of bound'
assert quant_max <= torch.iinfo(self.activation_post_process.dtype).max, \
Reported by Pylint.
Line: 48
Column: 16
self.zero_point = Parameter(torch.tensor([zero_point] * channel_len))
self.activation_post_process = observer(**observer_kwargs)
assert torch.iinfo(self.activation_post_process.dtype).min <= quant_min, \
'quant_min out of bound'
assert quant_max <= torch.iinfo(self.activation_post_process.dtype).max, \
'quant_max out of bound'
self.dtype = self.activation_post_process.dtype
self.qscheme = self.activation_post_process.qscheme
Reported by Pylint.
Line: 50
Column: 29
self.activation_post_process = observer(**observer_kwargs)
assert torch.iinfo(self.activation_post_process.dtype).min <= quant_min, \
'quant_min out of bound'
assert quant_max <= torch.iinfo(self.activation_post_process.dtype).max, \
'quant_max out of bound'
self.dtype = self.activation_post_process.dtype
self.qscheme = self.activation_post_process.qscheme
self.ch_axis = self.activation_post_process.ch_axis \
if hasattr(self.activation_post_process, 'ch_axis') else -1
Reported by Pylint.
Line: 56
Column: 52
self.qscheme = self.activation_post_process.qscheme
self.ch_axis = self.activation_post_process.ch_axis \
if hasattr(self.activation_post_process, 'ch_axis') else -1
self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
self.register_buffer('static_enabled', torch.tensor([1], dtype=torch.uint8))
self.register_buffer('learning_enabled', torch.tensor([0], dtype=torch.uint8))
bitrange = torch.tensor(quant_max - quant_min + 1).double()
self.bitwidth = int(torch.log2(bitrange).item())
Reported by Pylint.
Line: 56
Column: 76
self.qscheme = self.activation_post_process.qscheme
self.ch_axis = self.activation_post_process.ch_axis \
if hasattr(self.activation_post_process, 'ch_axis') else -1
self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
self.register_buffer('static_enabled', torch.tensor([1], dtype=torch.uint8))
self.register_buffer('learning_enabled', torch.tensor([0], dtype=torch.uint8))
bitrange = torch.tensor(quant_max - quant_min + 1).double()
self.bitwidth = int(torch.log2(bitrange).item())
Reported by Pylint.
Line: 57
Column: 72
self.ch_axis = self.activation_post_process.ch_axis \
if hasattr(self.activation_post_process, 'ch_axis') else -1
self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
self.register_buffer('static_enabled', torch.tensor([1], dtype=torch.uint8))
self.register_buffer('learning_enabled', torch.tensor([0], dtype=torch.uint8))
bitrange = torch.tensor(quant_max - quant_min + 1).double()
self.bitwidth = int(torch.log2(bitrange).item())
self.register_buffer('eps', torch.tensor([torch.finfo(torch.float32).eps]))
Reported by Pylint.
Line: 57
Column: 48
self.ch_axis = self.activation_post_process.ch_axis \
if hasattr(self.activation_post_process, 'ch_axis') else -1
self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
self.register_buffer('static_enabled', torch.tensor([1], dtype=torch.uint8))
self.register_buffer('learning_enabled', torch.tensor([0], dtype=torch.uint8))
bitrange = torch.tensor(quant_max - quant_min + 1).double()
self.bitwidth = int(torch.log2(bitrange).item())
self.register_buffer('eps', torch.tensor([torch.finfo(torch.float32).eps]))
Reported by Pylint.