The following issues were found
torch/fx/_symbolic_trace.py
109 issues
Line: 10
Column: 1
from typing import Any, Dict, NamedTuple, Optional, Set, Tuple, Type, List, Callable, Union
from itertools import chain
import torch
import torch._C._fx # type: ignore[import]
from torch._C import ScriptObject # type: ignore[attr-defined]
import torch.utils._pytree as pytree
import sys
from .node import Argument, map_aggregate, base_types
Reported by Pylint.
Line: 15
Column: 1
import torch.utils._pytree as pytree
import sys
from .node import Argument, map_aggregate, base_types
from .graph import Graph, _PyTreeInfo
from .graph_module import GraphModule
from .proxy import TracerBase, Proxy, ParameterProxy
HAS_VARSTUFF = inspect.CO_VARARGS | inspect.CO_VARKEYWORDS
Reported by Pylint.
Line: 16
Column: 1
import sys
from .node import Argument, map_aggregate, base_types
from .graph import Graph, _PyTreeInfo
from .graph_module import GraphModule
from .proxy import TracerBase, Proxy, ParameterProxy
HAS_VARSTUFF = inspect.CO_VARARGS | inspect.CO_VARKEYWORDS
Reported by Pylint.
Line: 17
Column: 1
import sys
from .node import Argument, map_aggregate, base_types
from .graph import Graph, _PyTreeInfo
from .graph_module import GraphModule
from .proxy import TracerBase, Proxy, ParameterProxy
HAS_VARSTUFF = inspect.CO_VARARGS | inspect.CO_VARKEYWORDS
# These need to run in global scope to handle nested calls correctly
Reported by Pylint.
Line: 18
Column: 1
from .node import Argument, map_aggregate, base_types
from .graph import Graph, _PyTreeInfo
from .graph_module import GraphModule
from .proxy import TracerBase, Proxy, ParameterProxy
HAS_VARSTUFF = inspect.CO_VARARGS | inspect.CO_VARKEYWORDS
# These need to run in global scope to handle nested calls correctly
_orig_module_call : Callable = torch.nn.Module.__call__
Reported by Pylint.
Line: 20
Column: 37
from .graph_module import GraphModule
from .proxy import TracerBase, Proxy, ParameterProxy
HAS_VARSTUFF = inspect.CO_VARARGS | inspect.CO_VARKEYWORDS
# These need to run in global scope to handle nested calls correctly
_orig_module_call : Callable = torch.nn.Module.__call__
_orig_module_getattr : Callable = torch.nn.Module.__getattr__
Reported by Pylint.
Line: 20
Column: 16
from .graph_module import GraphModule
from .proxy import TracerBase, Proxy, ParameterProxy
HAS_VARSTUFF = inspect.CO_VARARGS | inspect.CO_VARKEYWORDS
# These need to run in global scope to handle nested calls correctly
_orig_module_call : Callable = torch.nn.Module.__call__
_orig_module_getattr : Callable = torch.nn.Module.__getattr__
Reported by Pylint.
Line: 129
Column: 37
"""
def __init__(self, tracer):
self.tracer = tracer
patched_fns = [torch.randn, torch.rand, torch.randint]
def patched_impl(to_patch, args, kwargs):
return tracer.create_proxy('call_function', to_patch, args, kwargs)
c_patch_enabled = True
Reported by Pylint.
Line: 129
Column: 49
"""
def __init__(self, tracer):
self.tracer = tracer
patched_fns = [torch.randn, torch.rand, torch.randint]
def patched_impl(to_patch, args, kwargs):
return tracer.create_proxy('call_function', to_patch, args, kwargs)
c_patch_enabled = True
Reported by Pylint.
Line: 129
Column: 24
"""
def __init__(self, tracer):
self.tracer = tracer
patched_fns = [torch.randn, torch.rand, torch.randint]
def patched_impl(to_patch, args, kwargs):
return tracer.create_proxy('call_function', to_patch, args, kwargs)
c_patch_enabled = True
Reported by Pylint.
tools/codegen/api/types.py
108 issues
Line: 82
Column: 27
class BaseCType:
type: BaseCppType
def cpp_type(self, *, strip_ref: bool = False) -> str:
return str(self.type)
# For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
# TODO: Kill this when we eventually remove it!
def cpp_type_registration_declarations(self) -> str:
Reported by Pylint.
Line: 86
Column: 3
return str(self.type)
# For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
# TODO: Kill this when we eventually remove it!
def cpp_type_registration_declarations(self) -> str:
return str(self.type).replace('at::', '')
def remove_const_ref(self) -> 'CType':
return self
Reported by Pylint.
Line: 127
Column: 27
class OptionalCType:
elem: 'CType'
def cpp_type(self, *, strip_ref: bool = False) -> str:
# Do not pass `strip_ref` recursively.
return f'c10::optional<{self.elem.cpp_type()}>'
def cpp_type_registration_declarations(self) -> str:
return f'c10::optional<{self.elem.cpp_type_registration_declarations()}>'
Reported by Pylint.
Line: 141
Column: 27
class ListCType:
elem: 'CType'
def cpp_type(self, *, strip_ref: bool = False) -> str:
# Do not pass `strip_ref` recursively.
return f'c10::List<{self.elem.cpp_type()}>'
def cpp_type_registration_declarations(self) -> str:
return f'c10::List<{self.elem.cpp_type_registration_declarations()}>'
Reported by Pylint.
Line: 155
Column: 27
class ArrayRefCType:
elem: 'CType'
def cpp_type(self, *, strip_ref: bool = False) -> str:
# Do not pass `strip_ref` recursively.
return f'at::ArrayRef<{self.elem.cpp_type()}>'
def cpp_type_registration_declarations(self) -> str:
return f'ArrayRef<{self.elem.cpp_type_registration_declarations()}>'
Reported by Pylint.
Line: 169
Column: 27
class VectorCType:
elem: 'CType'
def cpp_type(self, *, strip_ref: bool = False) -> str:
# Do not pass `strip_ref` recursively.
return f'::std::vector<{self.elem.cpp_type()}>'
def cpp_type_registration_declarations(self) -> str:
return f'::std::vector<{self.elem.cpp_type_registration_declarations()}>'
Reported by Pylint.
Line: 184
Column: 27
elem: 'CType'
size: int
def cpp_type(self, *, strip_ref: bool = False) -> str:
# Do not pass `strip_ref` recursively.
return f'::std::array<{self.elem.cpp_type()},{self.size}>'
def cpp_type_registration_declarations(self) -> str:
return f'::std::array<{self.elem.cpp_type_registration_declarations()},{self.size}>'
Reported by Pylint.
Line: 198
Column: 27
class TupleCType:
elems: List['CType']
def cpp_type(self, *, strip_ref: bool = False) -> str:
# Do not pass `strip_ref` recursively.
return f'::std::tuple<{",".join([e.cpp_type() for e in self.elems])}>'
def cpp_type_registration_declarations(self) -> str:
return f'::std::tuple<{",".join([e.cpp_type_registration_declarations() for e in self.elems])}>'
Reported by Pylint.
Line: 236
Column: 3
return self.type.cpp_type(strip_ref=strip_ref)
# For BC reasons, we don't want to introduce at:: namespaces to RegistrationDeclarations.yaml
# TODO: Kill this when we eventually remove it!
def cpp_type_registration_declarations(self) -> str:
return self.type.cpp_type_registration_declarations()
def remove_const_ref(self) -> 'NamedCType':
return NamedCType(self.name, self.type.remove_const_ref())
Reported by Pylint.
Line: 257
Column: 3
name: str
nctype: NamedCType
argument: Union[Argument, TensorOptionsArguments, SelfArgument]
# TODO: maybe don't represent default here
default: Optional[str] = None
@property
def type(self) -> str:
return self.nctype.cpp_type()
Reported by Pylint.
caffe2/python/operator_test/spatial_bn_op_test.py
107 issues
Line: 12
Column: 1
from caffe2.python.model_helper import ModelHelper
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, assume, settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 13
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, assume, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestSpatialBN(serial.SerializedTestCase):
Reported by Pylint.
Line: 30
Column: 53
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_spatialbn_test_mode_3d(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, engine, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["X" if inplace else "Y"],
Reported by Pylint.
Line: 77
Column: 53
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_spatialbn_test_mode_1d(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, engine, gc, dc):
# Currently MIOPEN SpatialBN only supports 2D
if hiputl.run_in_hip(gc, dc):
assume(engine != "CUDNN")
op = core.CreateOperator(
Reported by Pylint.
Line: 124
Column: 53
inplace=st.booleans(),
**hu.gcs)
def test_spatialbn_test_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, engine, gc, dc):
# Currently HIP SpatialBN only supports NCHW
if hiputl.run_in_hip(gc, dc):
assume(order == "NCHW")
Reported by Pylint.
Line: 174
Column: 53
inplace=st.sampled_from([True, False]),
**hu.gcs)
def test_spatialbn_train_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
momentum, inplace, engine, gc, dc):
# Currently HIP SpatialBN only supports NCHW
if hiputl.run_in_hip(gc, dc):
assume(order == "NCHW")
Reported by Pylint.
Line: 475
Column: 13
**hu.gcs)
def test_spatialbn_brew_wrapper(
self, size, input_channels, batch_size, seed, epsilon,
engine, gc, dc):
np.random.seed(seed)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32)
workspace.FeedBlob('X', X)
Reported by Pylint.
Line: 475
Column: 21
**hu.gcs)
def test_spatialbn_brew_wrapper(
self, size, input_channels, batch_size, seed, epsilon,
engine, gc, dc):
np.random.seed(seed)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32)
workspace.FeedBlob('X', X)
Reported by Pylint.
Line: 475
Column: 25
**hu.gcs)
def test_spatialbn_brew_wrapper(
self, size, input_channels, batch_size, seed, epsilon,
engine, gc, dc):
np.random.seed(seed)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32)
workspace.FeedBlob('X', X)
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import brew, core, utils, workspace
import caffe2.python.hip_test_util as hiputl
import caffe2.python.hypothesis_test_util as hu
from caffe2.python.model_helper import ModelHelper
Reported by Pylint.
torch/utils/tensorboard/summary.py
107 issues
Line: 10
Column: 1
# pylint: disable=unused-import
from six.moves import range
from google.protobuf import struct_pb2
from tensorboard.compat.proto.summary_pb2 import Summary
from tensorboard.compat.proto.summary_pb2 import HistogramProto
from tensorboard.compat.proto.summary_pb2 import SummaryMetadata
from tensorboard.compat.proto.tensor_pb2 import TensorProto
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
Reported by Pylint.
Line: 11
Column: 1
from six.moves import range
from google.protobuf import struct_pb2
from tensorboard.compat.proto.summary_pb2 import Summary
from tensorboard.compat.proto.summary_pb2 import HistogramProto
from tensorboard.compat.proto.summary_pb2 import SummaryMetadata
from tensorboard.compat.proto.tensor_pb2 import TensorProto
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.plugins.text.plugin_data_pb2 import TextPluginData
Reported by Pylint.
Line: 12
Column: 1
from google.protobuf import struct_pb2
from tensorboard.compat.proto.summary_pb2 import Summary
from tensorboard.compat.proto.summary_pb2 import HistogramProto
from tensorboard.compat.proto.summary_pb2 import SummaryMetadata
from tensorboard.compat.proto.tensor_pb2 import TensorProto
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.plugins.text.plugin_data_pb2 import TextPluginData
from tensorboard.plugins.pr_curve.plugin_data_pb2 import PrCurvePluginData
Reported by Pylint.
Line: 13
Column: 1
from google.protobuf import struct_pb2
from tensorboard.compat.proto.summary_pb2 import Summary
from tensorboard.compat.proto.summary_pb2 import HistogramProto
from tensorboard.compat.proto.summary_pb2 import SummaryMetadata
from tensorboard.compat.proto.tensor_pb2 import TensorProto
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.plugins.text.plugin_data_pb2 import TextPluginData
from tensorboard.plugins.pr_curve.plugin_data_pb2 import PrCurvePluginData
from tensorboard.plugins.custom_scalar import layout_pb2
Reported by Pylint.
Line: 14
Column: 1
from tensorboard.compat.proto.summary_pb2 import Summary
from tensorboard.compat.proto.summary_pb2 import HistogramProto
from tensorboard.compat.proto.summary_pb2 import SummaryMetadata
from tensorboard.compat.proto.tensor_pb2 import TensorProto
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.plugins.text.plugin_data_pb2 import TextPluginData
from tensorboard.plugins.pr_curve.plugin_data_pb2 import PrCurvePluginData
from tensorboard.plugins.custom_scalar import layout_pb2
from ._convert_np import make_np
Reported by Pylint.
Line: 15
Column: 1
from tensorboard.compat.proto.summary_pb2 import HistogramProto
from tensorboard.compat.proto.summary_pb2 import SummaryMetadata
from tensorboard.compat.proto.tensor_pb2 import TensorProto
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.plugins.text.plugin_data_pb2 import TextPluginData
from tensorboard.plugins.pr_curve.plugin_data_pb2 import PrCurvePluginData
from tensorboard.plugins.custom_scalar import layout_pb2
from ._convert_np import make_np
from ._utils import _prepare_video, convert_to_HWC
Reported by Pylint.
Line: 16
Column: 1
from tensorboard.compat.proto.summary_pb2 import SummaryMetadata
from tensorboard.compat.proto.tensor_pb2 import TensorProto
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.plugins.text.plugin_data_pb2 import TextPluginData
from tensorboard.plugins.pr_curve.plugin_data_pb2 import PrCurvePluginData
from tensorboard.plugins.custom_scalar import layout_pb2
from ._convert_np import make_np
from ._utils import _prepare_video, convert_to_HWC
Reported by Pylint.
Line: 17
Column: 1
from tensorboard.compat.proto.tensor_pb2 import TensorProto
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.plugins.text.plugin_data_pb2 import TextPluginData
from tensorboard.plugins.pr_curve.plugin_data_pb2 import PrCurvePluginData
from tensorboard.plugins.custom_scalar import layout_pb2
from ._convert_np import make_np
from ._utils import _prepare_video, convert_to_HWC
Reported by Pylint.
Line: 18
Column: 1
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.plugins.text.plugin_data_pb2 import TextPluginData
from tensorboard.plugins.pr_curve.plugin_data_pb2 import PrCurvePluginData
from tensorboard.plugins.custom_scalar import layout_pb2
from ._convert_np import make_np
from ._utils import _prepare_video, convert_to_HWC
def _calc_scale_factor(tensor):
Reported by Pylint.
Line: 19
Column: 1
from tensorboard.plugins.text.plugin_data_pb2 import TextPluginData
from tensorboard.plugins.pr_curve.plugin_data_pb2 import PrCurvePluginData
from tensorboard.plugins.custom_scalar import layout_pb2
from ._convert_np import make_np
from ._utils import _prepare_video, convert_to_HWC
def _calc_scale_factor(tensor):
converted = tensor.numpy() if not isinstance(tensor, np.ndarray) else tensor
Reported by Pylint.
caffe2/python/operator_test/adam_test.py
106 issues
Line: 8
Column: 1
import functools
import hypothesis
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
Reported by Pylint.
Line: 9
Column: 1
import functools
import hypothesis
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 10
Column: 1
import hypothesis
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 62
Column: 9
if i == k - 1:
mom1_out += grad * (1 - beta1)
param_out += LR * mom1_out / (np.sqrt(mom2_out) + epsilon)
grad_out = mom1_out / (np.sqrt(mom2_out) + epsilon)
return param_out, mom1_out, mom2_out, last_seen_out
@staticmethod
def ref_row_wise_adam(param, mom1, mom2, grad, LR, ITER,
Reported by Pylint.
Line: 92
Column: 70
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**hu.gcs)
def test_adam(self, inputs, ITER, LR, beta1, beta2, epsilon, gc, dc):
param, mom1, mom2, grad = inputs
mom2 = np.abs(mom2)
ITER = np.array([ITER], dtype=np.int64)
LR = np.array([LR], dtype=np.float32)
Reported by Pylint.
Line: 126
Column: 82
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**hu.gcs_cpu_only)
def test_adam_output_grad(self, inputs, ITER, LR, beta1, beta2, epsilon, gc, dc):
param, mom1, mom2, grad = inputs
mom2 = np.abs(mom2)
ITER = np.array([ITER], dtype=np.int64)
LR = np.array([LR], dtype=np.float32)
Reported by Pylint.
Line: 162
Column: 45
data_strategy=st.data(),
**hu.gcs)
def test_sparse_adam(self, inputs, ITER, LR, beta1, beta2, epsilon,
data_strategy, gc, dc):
param, mom1, mom2, grad = inputs
mom2 = np.absolute(mom2)
ITER = np.array([ITER], dtype=np.int64)
LR = np.array([LR], dtype=np.float32)
Reported by Pylint.
Line: 200
Column: 17
mom2_out = np.copy(mom2)
for i, index in enumerate(indices):
param_out[index], mom1_out[index], mom2_out[index] = \
self.ref_adam(param[index], mom1[index], mom2[index],
grad[i], LR, ITER,
beta1, beta2, epsilon)
return (param_out, mom1_out, mom2_out)
Reported by Pylint.
Line: 228
Column: 57
data_strategy=st.data(),
**hu.gcs)
def test_smart_decay_sparse_adam(self, inputs, ITER, LR, beta1, beta2, epsilon,
data_strategy, gc, dc):
param, mom1, mom2, grad = inputs
mom2 = np.absolute(mom2)
_iter, _lr = ITER, LR # Keep the scalar types for reference
ITER = np.array([ITER], dtype=np.int64)
LR = np.array([LR], dtype=np.float32)
Reported by Pylint.
Line: 307
Column: 57
data_strategy=st.data(),
**hu.gcs)
def test_sparse_adam_output_grad(self, inputs, ITER, LR, beta1, beta2, epsilon,
data_strategy, gc, dc):
param, mom1, mom2, grad = inputs
mom2 = np.absolute(mom2)
ITER = np.array([ITER], dtype=np.int64)
LR = np.array([LR], dtype=np.float32)
Reported by Pylint.
benchmarks/functional_autograd_benchmark/torchaudio_models.py
106 issues
Line: 4
Column: 1
# Taken from https://github.com/pytorch/audio/blob/master/torchaudio/models/wav2letter.py
# So that we don't need torchaudio to be installed
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
import math
Reported by Pylint.
Line: 5
Column: 1
# So that we don't need torchaudio to be installed
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
import math
from collections import OrderedDict
Reported by Pylint.
Line: 6
Column: 1
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
import math
from collections import OrderedDict
from typing import Tuple, Optional
Reported by Pylint.
Line: 7
Column: 1
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
import math
from collections import OrderedDict
from typing import Tuple, Optional
Reported by Pylint.
Line: 332
Column: 13
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerModel, self).__init__()
try:
from torch.nn import TransformerEncoder, TransformerEncoderLayer
except Exception:
raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.')
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
Reported by Pylint.
Line: 161
Column: 12
if self.batch_norm is not None:
x = self.batch_norm(x)
x = nn.utils.rnn.pack_padded_sequence(x, output_lengths, enforce_sorted=False)
x, h = self.rnn(x)
x, _ = nn.utils.rnn.pad_packed_sequence(x)
if self.bidirectional:
x = x.view(x.size(0), x.size(1), 2, -1).sum(2).view(x.size(0), x.size(1), -1) # (TxNxH*2) -> (TxNxH) by sum
return x
Reported by Pylint.
Line: 334
Column: 13
try:
from torch.nn import TransformerEncoder, TransformerEncoderLayer
except Exception:
raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.')
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
Reported by Pylint.
Line: 1
Column: 1
# Taken from https://github.com/pytorch/audio/blob/master/torchaudio/models/wav2letter.py
# So that we don't need torchaudio to be installed
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
import math
Reported by Pylint.
Line: 9
Column: 1
from torch import nn
import torch.nn.functional as F
import math
from collections import OrderedDict
from typing import Tuple, Optional
__all__ = ["Wav2Letter"]
Reported by Pylint.
Line: 10
Column: 1
import torch.nn.functional as F
import math
from collections import OrderedDict
from typing import Tuple, Optional
__all__ = ["Wav2Letter"]
Reported by Pylint.
caffe2/python/operator_test/shape_inference_test.py
106 issues
Line: 24
Column: 22
brew.fc(m, "fc1", "fc2", dim_in=32, dim_out=55)
for b in [0, 64]:
(shapes, types) = workspace.InferShapesAndTypes(
[m.param_init_net, m.net],
{'data': [b, 96]}
)
self.assertEquals(shapes['data'], [b, 96])
Reported by Pylint.
Line: 29
Column: 13
{'data': [b, 96]}
)
self.assertEquals(shapes['data'], [b, 96])
self.assertEquals(shapes['fc1_w'], [32, 96])
self.assertEquals(shapes['fc1_b'], [32])
self.assertEquals(shapes['fc1'], [b, 32])
self.assertEquals(shapes['fc2_w'], [55, 32])
self.assertEquals(shapes['fc2_b'], [55])
Reported by Pylint.
Line: 30
Column: 13
)
self.assertEquals(shapes['data'], [b, 96])
self.assertEquals(shapes['fc1_w'], [32, 96])
self.assertEquals(shapes['fc1_b'], [32])
self.assertEquals(shapes['fc1'], [b, 32])
self.assertEquals(shapes['fc2_w'], [55, 32])
self.assertEquals(shapes['fc2_b'], [55])
self.assertEquals(shapes['fc2'], [b, 55])
Reported by Pylint.
Line: 31
Column: 13
self.assertEquals(shapes['data'], [b, 96])
self.assertEquals(shapes['fc1_w'], [32, 96])
self.assertEquals(shapes['fc1_b'], [32])
self.assertEquals(shapes['fc1'], [b, 32])
self.assertEquals(shapes['fc2_w'], [55, 32])
self.assertEquals(shapes['fc2_b'], [55])
self.assertEquals(shapes['fc2'], [b, 55])
Reported by Pylint.
Line: 32
Column: 13
self.assertEquals(shapes['data'], [b, 96])
self.assertEquals(shapes['fc1_w'], [32, 96])
self.assertEquals(shapes['fc1_b'], [32])
self.assertEquals(shapes['fc1'], [b, 32])
self.assertEquals(shapes['fc2_w'], [55, 32])
self.assertEquals(shapes['fc2_b'], [55])
self.assertEquals(shapes['fc2'], [b, 55])
def testFCAxis2(self):
Reported by Pylint.
Line: 33
Column: 13
self.assertEquals(shapes['fc1_w'], [32, 96])
self.assertEquals(shapes['fc1_b'], [32])
self.assertEquals(shapes['fc1'], [b, 32])
self.assertEquals(shapes['fc2_w'], [55, 32])
self.assertEquals(shapes['fc2_b'], [55])
self.assertEquals(shapes['fc2'], [b, 55])
def testFCAxis2(self):
model = model_helper.ModelHelper(name="test_model")
Reported by Pylint.
Line: 34
Column: 13
self.assertEquals(shapes['fc1_b'], [32])
self.assertEquals(shapes['fc1'], [b, 32])
self.assertEquals(shapes['fc2_w'], [55, 32])
self.assertEquals(shapes['fc2_b'], [55])
self.assertEquals(shapes['fc2'], [b, 55])
def testFCAxis2(self):
model = model_helper.ModelHelper(name="test_model")
model.net.FC(["x", "w", "b"], ["y"], axis=2)
Reported by Pylint.
Line: 35
Column: 13
self.assertEquals(shapes['fc1'], [b, 32])
self.assertEquals(shapes['fc2_w'], [55, 32])
self.assertEquals(shapes['fc2_b'], [55])
self.assertEquals(shapes['fc2'], [b, 55])
def testFCAxis2(self):
model = model_helper.ModelHelper(name="test_model")
model.net.FC(["x", "w", "b"], ["y"], axis=2)
workspace.FeedBlob("x", np.random.rand(4, 20, 36).astype(np.float32))
Reported by Pylint.
Line: 420
Column: 18
net.Concat(["A", "B"], ["C", "splits"], axis=1)
net.Concat(["C", "D"], ["E", "splitsE"], order="NCHW")
net.Concat(["E", "F"], ["G", "splitsG"], add_axis=1, order="NHWC")
(shapes, types) = workspace.InferShapesAndTypes(
[net],
{
'A': [10, 12, 9, 10],
'B': [10, 9, 9, 10],
'D': [10, 2, 9, 10],
Reported by Pylint.
Line: 467
Column: 18
def testSqueeze(self):
net = core.Net("sq")
net.Squeeze(["data"], ["data_squeezed"], dims=[3, 1])
(shapes, types) = workspace.InferShapesAndTypes(
[net],
{'data': [64, 1, 96, 1, 4]}
)
self.assertEqual(shapes['data_squeezed'], [64, 96, 4])
Reported by Pylint.
torch/utils/data/dataloader.py
106 issues
Line: 21
Column: 1
from torch._utils import ExceptionWrapper
from torch._six import string_classes
from . import IterableDataset, Sampler, SequentialSampler, RandomSampler, BatchSampler, Dataset
from . import _utils
T_co = TypeVar('T_co', covariant=True)
T = TypeVar('T')
_worker_init_fn_t = Callable[[int], None]
Reported by Pylint.
Line: 22
Column: 1
from torch._six import string_classes
from . import IterableDataset, Sampler, SequentialSampler, RandomSampler, BatchSampler, Dataset
from . import _utils
T_co = TypeVar('T_co', covariant=True)
T = TypeVar('T')
_worker_init_fn_t = Callable[[int], None]
Reported by Pylint.
Line: 298
Column: 9
self.check_worker_number_rationality()
torch.set_vital('Dataloader', 'enabled', 'True') # type: ignore[attr-defined]
def _get_iterator(self) -> '_BaseDataLoaderIter':
if self.num_workers == 0:
return _SingleProcessDataLoaderIter(self)
else:
Reported by Pylint.
Line: 498
Column: 49
self._timeout = loader.timeout
self._collate_fn = loader.collate_fn
self._sampler_iter = iter(self._index_sampler)
self._base_seed = torch.empty((), dtype=torch.int64).random_(generator=loader.generator).item()
self._persistent_workers = loader.persistent_workers
self._num_yielded = 0
self._profile_name = "enumerate(DataLoader)#{}.__next__".format(self.__class__.__name__)
def __iter__(self) -> '_BaseDataLoaderIter':
Reported by Pylint.
Line: 498
Column: 27
self._timeout = loader.timeout
self._collate_fn = loader.collate_fn
self._sampler_iter = iter(self._index_sampler)
self._base_seed = torch.empty((), dtype=torch.int64).random_(generator=loader.generator).item()
self._persistent_workers = loader.persistent_workers
self._num_yielded = 0
self._profile_name = "enumerate(DataLoader)#{}.__next__".format(self.__class__.__name__)
def __iter__(self) -> '_BaseDataLoaderIter':
Reported by Pylint.
Line: 520
Column: 17
def __next__(self) -> Any:
with torch.autograd.profiler.record_function(self._profile_name):
if self._sampler_iter is None:
self._reset()
data = self._next_data()
self._num_yielded += 1
if self._dataset_kind == _DatasetKind.Iterable and \
self._IterableDataset_len_called is not None and \
self._num_yielded > self._IterableDataset_len_called:
Reported by Pylint.
Line: 1016
Column: 20
fds_limit_margin = 10
fs = [tempfile.NamedTemporaryFile() for i in range(fds_limit_margin)]
except OSError as e:
if e.errno == errno.EMFILE:
raise RuntimeError(
"Too many open files. Communication with the"
" workers is no longer possible. Please increase the"
" limit using `ulimit -n` in the shell or change the"
" sharing strategy by calling"
Reported by Pylint.
Line: 50
Column: 20
@staticmethod
def create_fetcher(kind, dataset, auto_collation, collate_fn, drop_last):
if kind == _DatasetKind.Map:
return _utils.fetch._MapDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
else:
return _utils.fetch._IterableDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
class _InfiniteConstantSampler(Sampler):
Reported by Pylint.
Line: 52
Column: 20
if kind == _DatasetKind.Map:
return _utils.fetch._MapDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
else:
return _utils.fetch._IterableDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
class _InfiniteConstantSampler(Sampler):
r"""Analogous to ``itertools.repeat(None, None)``.
Used as sampler for :class:`~torch.utils.data.IterableDataset`.
Reported by Pylint.
Line: 461
Column: 20
try:
max_num_worker_suggest = len(os.sched_getaffinity(0))
cpuset_checked = True
except Exception:
pass
if max_num_worker_suggest is None:
# os.cpu_count() could return Optional[int]
# get cpu count first and check None in order to satify mypy check
cpu_count = os.cpu_count()
Reported by Pylint.
test/ao/sparsity/test_pruner.py
105 issues
Line: 5
Column: 1
import logging
import torch
from torch import nn
from torch.ao.sparsity import BasePruner, PruningParametrization
from torch.nn.utils import parametrize
from torch.testing._internal.common_utils import TestCase
Reported by Pylint.
Line: 6
Column: 1
import logging
import torch
from torch import nn
from torch.ao.sparsity import BasePruner, PruningParametrization
from torch.nn.utils import parametrize
from torch.testing._internal.common_utils import TestCase
Reported by Pylint.
Line: 7
Column: 1
import torch
from torch import nn
from torch.ao.sparsity import BasePruner, PruningParametrization
from torch.nn.utils import parametrize
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
Reported by Pylint.
Line: 8
Column: 1
import torch
from torch import nn
from torch.ao.sparsity import BasePruner, PruningParametrization
from torch.nn.utils import parametrize
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
Reported by Pylint.
Line: 10
Column: 1
from torch.ao.sparsity import BasePruner, PruningParametrization
from torch.nn.utils import parametrize
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
DEVICES = {"cpu", "cuda" if torch.cuda.is_available() else "cpu"}
Reported by Pylint.
Line: 142
Column: 1
class SimplePruner(BasePruner):
def update_mask(self, layer, **kwargs):
layer.parametrizations.weight[0].pruned_outputs.add(1)
class MultiplePruner(BasePruner):
def update_mask(self, layer, **kwargs):
Reported by Pylint.
Line: 147
Column: 1
class MultiplePruner(BasePruner):
def update_mask(self, layer, **kwargs):
layer.parametrizations.weight[0].pruned_outputs.update([1, 2])
class TestBasePruner(TestCase):
def _check_pruner_prepared(self, model, pruner, device):
Reported by Pylint.
Line: 152
Column: 38
class TestBasePruner(TestCase):
def _check_pruner_prepared(self, model, pruner, device):
for g in pruner.module_groups:
module = g['module']
assert module.weight.device == device
# Check mask exists
assert hasattr(module, 'mask')
Reported by Pylint.
Line: 164
Column: 39
# Assume that this is the 1st/only parametrization
assert type(module.parametrizations.weight[0]) == PruningParametrization
def _check_pruner_converted(self, model, pruner, device):
for g in pruner.module_groups:
module = g['module']
assert module.weight.device == device
assert not hasattr(module, "parametrizations")
assert not hasattr(module, 'mask')
Reported by Pylint.
Line: 171
Column: 47
assert not hasattr(module, "parametrizations")
assert not hasattr(module, 'mask')
def _check_pruner_valid_before_step(self, model, pruner, device):
for g in pruner.module_groups:
module = g['module']
assert module.weight.device == device
assert module.parametrizations.weight[0].pruned_outputs == set()
Reported by Pylint.
test/distributed/elastic/agent/server/test/api_test.py
104 issues
Line: 16
Column: 1
from typing import Any, Dict
from unittest.mock import call, patch
import torch.distributed.elastic.rendezvous.registry as rdzv_registry
from torch.distributed.elastic.agent.server.api import (
RunResult,
SimpleElasticAgent,
WorkerGroup,
WorkerSpec,
Reported by Pylint.
Line: 17
Column: 1
from unittest.mock import call, patch
import torch.distributed.elastic.rendezvous.registry as rdzv_registry
from torch.distributed.elastic.agent.server.api import (
RunResult,
SimpleElasticAgent,
WorkerGroup,
WorkerSpec,
WorkerState,
Reported by Pylint.
Line: 26
Column: 1
_get_fq_hostname,
_RoleInstanceInfo,
)
from torch.distributed.elastic.multiprocessing import SignalException
from torch.distributed.elastic.multiprocessing.errors import ProcessFailure
from torch.distributed.elastic.rendezvous import RendezvousHandler, RendezvousParameters
from torch.distributed.elastic.utils.distributed import get_free_port
from torch.testing._internal.common_utils import run_tests
Reported by Pylint.
Line: 27
Column: 1
_RoleInstanceInfo,
)
from torch.distributed.elastic.multiprocessing import SignalException
from torch.distributed.elastic.multiprocessing.errors import ProcessFailure
from torch.distributed.elastic.rendezvous import RendezvousHandler, RendezvousParameters
from torch.distributed.elastic.utils.distributed import get_free_port
from torch.testing._internal.common_utils import run_tests
Reported by Pylint.
Line: 28
Column: 1
)
from torch.distributed.elastic.multiprocessing import SignalException
from torch.distributed.elastic.multiprocessing.errors import ProcessFailure
from torch.distributed.elastic.rendezvous import RendezvousHandler, RendezvousParameters
from torch.distributed.elastic.utils.distributed import get_free_port
from torch.testing._internal.common_utils import run_tests
def do_nothing():
Reported by Pylint.
Line: 29
Column: 1
from torch.distributed.elastic.multiprocessing import SignalException
from torch.distributed.elastic.multiprocessing.errors import ProcessFailure
from torch.distributed.elastic.rendezvous import RendezvousHandler, RendezvousParameters
from torch.distributed.elastic.utils.distributed import get_free_port
from torch.testing._internal.common_utils import run_tests
def do_nothing():
pass
Reported by Pylint.
Line: 30
Column: 1
from torch.distributed.elastic.multiprocessing.errors import ProcessFailure
from torch.distributed.elastic.rendezvous import RendezvousHandler, RendezvousParameters
from torch.distributed.elastic.utils.distributed import get_free_port
from torch.testing._internal.common_utils import run_tests
def do_nothing():
pass
Reported by Pylint.
Line: 189
Column: 9
spec = self._get_worker_spec(max_restarts=1)
agent = TestAgent(spec)
worker_group = agent.get_worker_group()
self.assertEquals(WorkerState.INIT, worker_group.state)
self.assertEquals(spec.max_restarts, agent._remaining_restarts)
@patch("torch.distributed.elastic.agent.server.api.put_metric")
def test_record_flakiness_metric(self, put_metric_mock):
spec = self._get_worker_spec(max_restarts=1)
Reported by Pylint.
Line: 190
Column: 9
agent = TestAgent(spec)
worker_group = agent.get_worker_group()
self.assertEquals(WorkerState.INIT, worker_group.state)
self.assertEquals(spec.max_restarts, agent._remaining_restarts)
@patch("torch.distributed.elastic.agent.server.api.put_metric")
def test_record_flakiness_metric(self, put_metric_mock):
spec = self._get_worker_spec(max_restarts=1)
agent = TestAgent(spec)
Reported by Pylint.
Line: 190
Column: 46
agent = TestAgent(spec)
worker_group = agent.get_worker_group()
self.assertEquals(WorkerState.INIT, worker_group.state)
self.assertEquals(spec.max_restarts, agent._remaining_restarts)
@patch("torch.distributed.elastic.agent.server.api.put_metric")
def test_record_flakiness_metric(self, put_metric_mock):
spec = self._get_worker_spec(max_restarts=1)
agent = TestAgent(spec)
Reported by Pylint.