The following issues were found
caffe2/python/operator_test/key_split_ops_test.py
13 issues
Line: 6
Column: 1
import hypothesis.strategies as st
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 9
Column: 1
import hypothesis.strategies as st
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np
Reported by Pylint.
Line: 24
Column: 40
),
**hu.gcs_cpu_only
)
def test_key_split_op(self, X, gc, dc):
categorical_limit = max(X) + 1
workspace.ResetWorkspace()
workspace.FeedBlob('X', X)
output_blobs = ['Y_%d' % i for i in range(categorical_limit)]
op = core.CreateOperator(
Reported by Pylint.
Line: 24
Column: 36
),
**hu.gcs_cpu_only
)
def test_key_split_op(self, X, gc, dc):
categorical_limit = max(X) + 1
workspace.ResetWorkspace()
workspace.FeedBlob('X', X)
output_blobs = ['Y_%d' % i for i in range(categorical_limit)]
op = core.CreateOperator(
Reported by Pylint.
Line: 1
Column: 1
import hypothesis.strategies as st
from caffe2.python import core, workspace
from hypothesis import given
Reported by Pylint.
Line: 15
Column: 1
import numpy as np
class TestKeySplitOps(hu.HypothesisTestCase):
@given(
X=hu.arrays(
dims=[1000],
dtype=np.int64,
elements=st.integers(min_value=0, max_value=100)
Reported by Pylint.
Line: 23
Column: 5
elements=st.integers(min_value=0, max_value=100)
),
**hu.gcs_cpu_only
)
def test_key_split_op(self, X, gc, dc):
categorical_limit = max(X) + 1
workspace.ResetWorkspace()
workspace.FeedBlob('X', X)
output_blobs = ['Y_%d' % i for i in range(categorical_limit)]
Reported by Pylint.
Line: 23
Column: 5
elements=st.integers(min_value=0, max_value=100)
),
**hu.gcs_cpu_only
)
def test_key_split_op(self, X, gc, dc):
categorical_limit = max(X) + 1
workspace.ResetWorkspace()
workspace.FeedBlob('X', X)
output_blobs = ['Y_%d' % i for i in range(categorical_limit)]
Reported by Pylint.
Line: 23
Column: 5
elements=st.integers(min_value=0, max_value=100)
),
**hu.gcs_cpu_only
)
def test_key_split_op(self, X, gc, dc):
categorical_limit = max(X) + 1
workspace.ResetWorkspace()
workspace.FeedBlob('X', X)
output_blobs = ['Y_%d' % i for i in range(categorical_limit)]
Reported by Pylint.
Line: 23
Column: 5
elements=st.integers(min_value=0, max_value=100)
),
**hu.gcs_cpu_only
)
def test_key_split_op(self, X, gc, dc):
categorical_limit = max(X) + 1
workspace.ResetWorkspace()
workspace.FeedBlob('X', X)
output_blobs = ['Y_%d' % i for i in range(categorical_limit)]
Reported by Pylint.
caffe2/python/operator_test/onnx_while_test.py
13 issues
Line: 9
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 10
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestONNXWhile(serial.SerializedTestCase):
Reported by Pylint.
Line: 25
Column: 85
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_onnx_while_fibb(
self, condition, max_trip_count, save_scopes, disable_scopes, seed, gc, dc):
np.random.seed(seed)
if disable_scopes:
save_scopes = False
# Create body net
Reported by Pylint.
Line: 76
Column: 56
first_init = np.array([1]).astype(np.float32)
second_init = np.array([1]).astype(np.float32)
def ref(max_trip_count, condition, first_init, second_init):
first = 1
second = 1
results = []
if condition:
for _ in range(max_trip_count):
Reported by Pylint.
Line: 76
Column: 44
first_init = np.array([1]).astype(np.float32)
second_init = np.array([1]).astype(np.float32)
def ref(max_trip_count, condition, first_init, second_init):
first = 1
second = 1
results = []
if condition:
for _ in range(max_trip_count):
Reported by Pylint.
Line: 1
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
Reported by Pylint.
Line: 12
Column: 1
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestONNXWhile(serial.SerializedTestCase):
@given(
condition=st.booleans(),
Reported by Pylint.
Line: 15
Column: 1
import unittest
class TestONNXWhile(serial.SerializedTestCase):
@given(
condition=st.booleans(),
max_trip_count=st.integers(0, 100),
save_scopes=st.booleans(),
disable_scopes=st.booleans(),
Reported by Pylint.
Line: 24
Column: 5
seed=st.integers(0, 65535),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_onnx_while_fibb(
self, condition, max_trip_count, save_scopes, disable_scopes, seed, gc, dc):
np.random.seed(seed)
if disable_scopes:
save_scopes = False
Reported by Pylint.
Line: 24
Column: 5
seed=st.integers(0, 65535),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_onnx_while_fibb(
self, condition, max_trip_count, save_scopes, disable_scopes, seed, gc, dc):
np.random.seed(seed)
if disable_scopes:
save_scopes = False
Reported by Pylint.
test/onnx/debug_embed_params.py
13 issues
Line: 3
Column: 1
import sys
import torch
import torch.jit
from torch.autograd import Variable
import onnx
import caffe2.python.onnx.backend as c2
from test_pytorch_common import flatten
Reported by Pylint.
Line: 4
Column: 1
import sys
import torch
import torch.jit
from torch.autograd import Variable
import onnx
import caffe2.python.onnx.backend as c2
from test_pytorch_common import flatten
Reported by Pylint.
Line: 5
Column: 1
import torch
import torch.jit
from torch.autograd import Variable
import onnx
import caffe2.python.onnx.backend as c2
from test_pytorch_common import flatten
Reported by Pylint.
Line: 7
Column: 1
import torch.jit
from torch.autograd import Variable
import onnx
import caffe2.python.onnx.backend as c2
from test_pytorch_common import flatten
torch.set_default_tensor_type("torch.FloatTensor")
Reported by Pylint.
Line: 8
Column: 1
from torch.autograd import Variable
import onnx
import caffe2.python.onnx.backend as c2
from test_pytorch_common import flatten
torch.set_default_tensor_type("torch.FloatTensor")
try:
Reported by Pylint.
Line: 14
Column: 5
torch.set_default_tensor_type("torch.FloatTensor")
try:
import torch
except ImportError:
print("Cannot import torch, hence caffe2-torch test will not run.")
sys.exit(0)
Reported by Pylint.
Line: 20
Column: 36
sys.exit(0)
def run_embed_params(proto, model, input, state_dict=None, use_gpu=True):
"""
This is only a helper debug function so we can test embed_params=False
case as well on pytorch front
This should likely be removed from the release version of the code
"""
Reported by Pylint.
Line: 37
Column: 3
parameters = []
# Passed in state_dict may have a different order. Make
# sure our order is consistent with the model's order.
# TODO: Even better: keyword arguments!
for k in model.state_dict():
if k in state_dict:
parameters.append(state_dict[k])
else:
parameters = list(model.state_dict().values())
Reported by Pylint.
Line: 1
Column: 1
import sys
import torch
import torch.jit
from torch.autograd import Variable
import onnx
import caffe2.python.onnx.backend as c2
from test_pytorch_common import flatten
Reported by Pylint.
Line: 8
Column: 1
from torch.autograd import Variable
import onnx
import caffe2.python.onnx.backend as c2
from test_pytorch_common import flatten
torch.set_default_tensor_type("torch.FloatTensor")
try:
Reported by Pylint.
caffe2/python/operator_test/copy_rows_to_tensor_op_test.py
13 issues
Line: 8
Column: 1
import caffe2.python.hypothesis_test_util as hu
import numpy as np
from caffe2.python import core
from hypothesis import given, settings, strategies as st
logger = logging.getLogger(__name__)
Reported by Pylint.
Line: 29
Column: 58
class TestCopyRowsToTensor(hu.HypothesisTestCase):
@given(input_tensor=get_input_tensors(), **hu.gcs_cpu_only)
def test_copy_rows_to_tensor(self, input_tensor, gc, dc):
dtype = np.random.choice([np.float16, np.float32, np.int32, np.int64], 1)[0]
input_tensor = np.array(input_tensor).astype(dtype)
height = np.shape(input_tensor)[0]
width = np.shape(input_tensor)[1]
row = np.random.rand(width).astype(dtype)
Reported by Pylint.
Line: 56
Column: 72
@given(input_tensor=get_input_tensors(), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_copy_rows_to_tensor_invalid_input(self, input_tensor, gc, dc):
input_tensor = np.array(input_tensor).astype(np.float32)
height = np.shape(input_tensor)[0]
width = np.shape(input_tensor)[1]
row = np.random.rand(width + 1).astype(np.float32)
indices_lengths = np.random.randint(height)
Reported by Pylint.
Line: 1
Column: 1
import logging
import caffe2.python.hypothesis_test_util as hu
import numpy as np
from caffe2.python import core
from hypothesis import given, settings, strategies as st
Reported by Pylint.
Line: 14
Column: 1
logger = logging.getLogger(__name__)
def get_input_tensors():
height = np.random.randint(1, 10)
width = np.random.randint(1, 10)
dtype = np.float32
input_tensor = hu.arrays(
dims=[height, width],
Reported by Pylint.
Line: 27
Column: 1
return input_tensor
class TestCopyRowsToTensor(hu.HypothesisTestCase):
@given(input_tensor=get_input_tensors(), **hu.gcs_cpu_only)
def test_copy_rows_to_tensor(self, input_tensor, gc, dc):
dtype = np.random.choice([np.float16, np.float32, np.int32, np.int64], 1)[0]
input_tensor = np.array(input_tensor).astype(dtype)
height = np.shape(input_tensor)[0]
Reported by Pylint.
Line: 29
Column: 5
class TestCopyRowsToTensor(hu.HypothesisTestCase):
@given(input_tensor=get_input_tensors(), **hu.gcs_cpu_only)
def test_copy_rows_to_tensor(self, input_tensor, gc, dc):
dtype = np.random.choice([np.float16, np.float32, np.int32, np.int64], 1)[0]
input_tensor = np.array(input_tensor).astype(dtype)
height = np.shape(input_tensor)[0]
width = np.shape(input_tensor)[1]
row = np.random.rand(width).astype(dtype)
Reported by Pylint.
Line: 29
Column: 5
class TestCopyRowsToTensor(hu.HypothesisTestCase):
@given(input_tensor=get_input_tensors(), **hu.gcs_cpu_only)
def test_copy_rows_to_tensor(self, input_tensor, gc, dc):
dtype = np.random.choice([np.float16, np.float32, np.int32, np.int64], 1)[0]
input_tensor = np.array(input_tensor).astype(dtype)
height = np.shape(input_tensor)[0]
width = np.shape(input_tensor)[1]
row = np.random.rand(width).astype(dtype)
Reported by Pylint.
Line: 29
Column: 5
class TestCopyRowsToTensor(hu.HypothesisTestCase):
@given(input_tensor=get_input_tensors(), **hu.gcs_cpu_only)
def test_copy_rows_to_tensor(self, input_tensor, gc, dc):
dtype = np.random.choice([np.float16, np.float32, np.int32, np.int64], 1)[0]
input_tensor = np.array(input_tensor).astype(dtype)
height = np.shape(input_tensor)[0]
width = np.shape(input_tensor)[1]
row = np.random.rand(width).astype(dtype)
Reported by Pylint.
Line: 44
Column: 9
for idx in indices:
input_tensor[idx] = row
return [input_tensor]
op = core.CreateOperator(
"CopyRowsToTensor", ["input_tensor", "indices", "row"], ["input_tensor"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
Reported by Pylint.
torch/nn/cpp.py
13 issues
Line: 49
Column: 1
return self.cpp_dict.__getitem__(key)
class ModuleWrapper(nn.Module):
"""
A subclass of ``torch.nn.Module`` that wraps a C++ frontend module and
delegates all access.
"""
Reported by Pylint.
Line: 73
Column: 16
# Tensors stored in modules are graph leaves, and we don't
# want to create copy nodes, so we have to unpack the data.
param.data = fn(param.data)
if param._grad is not None:
param._grad.data = fn(param._grad.data)
for buf in self.buffers():
buf.data = fn(buf.data)
Reported by Pylint.
Line: 74
Column: 39
# want to create copy nodes, so we have to unpack the data.
param.data = fn(param.data)
if param._grad is not None:
param._grad.data = fn(param._grad.data)
for buf in self.buffers():
buf.data = fn(buf.data)
return self
Reported by Pylint.
Line: 74
Column: 17
# want to create copy nodes, so we have to unpack the data.
param.data = fn(param.data)
if param._grad is not None:
param._grad.data = fn(param._grad.data)
for buf in self.buffers():
buf.data = fn(buf.data)
return self
Reported by Pylint.
Line: 6
Column: 1
from torch import nn
class OrderedDictWrapper(object):
"""
A wrapper around a C++ OrderedDict that dynamically evaluates the
OrderedDict getter on a bound C++ module, such that new changes on the C++
side are picked up. Otherwise accessing e.g. ``cpp_module._parameters`` just
once would get a frozen copy of the parameters at the time of access.
Reported by Pylint.
Line: 21
Column: 5
self.attr = attr
@property
def cpp_dict(self):
return getattr(self.cpp_module, self.attr)
# Magic methods cannot be assigned dynamically and bypass ``getattr``, so we
# must manually override them.
Reported by Pylint.
Line: 27
Column: 5
# Magic methods cannot be assigned dynamically and bypass ``getattr``, so we
# must manually override them.
def items(self):
return self.cpp_dict.items()
def keys(self):
return self.cpp_dict.keys()
Reported by Pylint.
Line: 30
Column: 5
def items(self):
return self.cpp_dict.items()
def keys(self):
return self.cpp_dict.keys()
def values(self):
return self.cpp_dict.values()
Reported by Pylint.
Line: 33
Column: 5
def keys(self):
return self.cpp_dict.keys()
def values(self):
return self.cpp_dict.values()
def __iter__(self):
return self.cpp_dict.__iter__()
Reported by Pylint.
Line: 59
Column: 9
# Assign before the super class constructor so ``self.training`` can be
# assigned to in the super class constructor.
self.cpp_module = cpp_module
super(ModuleWrapper, self).__init__()
self._parameters = OrderedDictWrapper(cpp_module, "_parameters") # type: ignore[assignment]
self._buffers: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_buffers") # type: ignore[assignment]
self._modules: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_modules") # type: ignore[assignment]
for attr in dir(cpp_module):
# Skip magic methods and the three attributes above.
Reported by Pylint.
torch/quantization/stubs.py
13 issues
Line: 25
Column: 5
r"""Dequantize stub module, before calibration, this is same as identity,
this will be swapped as `nnq.DeQuantize` in `convert`.
"""
def __init__(self):
super(DeQuantStub, self).__init__()
def forward(self, x):
return x
Reported by Pylint.
Line: 1
Column: 1
from torch import nn
class QuantStub(nn.Module):
r"""Quantize stub module, before calibration, this is same as an observer,
it will be swapped as `nnq.Quantize` in `convert`.
Args:
qconfig: quantization configuration for the tensor,
Reported by Pylint.
Line: 13
Column: 9
if qconfig is not provided, we will get qconfig from parent modules
"""
def __init__(self, qconfig=None):
super(QuantStub, self).__init__()
if qconfig:
self.qconfig = qconfig
def forward(self, x):
return x
Reported by Pylint.
Line: 17
Column: 5
if qconfig:
self.qconfig = qconfig
def forward(self, x):
return x
class DeQuantStub(nn.Module):
r"""Dequantize stub module, before calibration, this is same as identity,
Reported by Pylint.
Line: 17
Column: 5
if qconfig:
self.qconfig = qconfig
def forward(self, x):
return x
class DeQuantStub(nn.Module):
r"""Dequantize stub module, before calibration, this is same as identity,
Reported by Pylint.
Line: 17
Column: 5
if qconfig:
self.qconfig = qconfig
def forward(self, x):
return x
class DeQuantStub(nn.Module):
r"""Dequantize stub module, before calibration, this is same as identity,
Reported by Pylint.
Line: 26
Column: 9
this will be swapped as `nnq.DeQuantize` in `convert`.
"""
def __init__(self):
super(DeQuantStub, self).__init__()
def forward(self, x):
return x
Reported by Pylint.
Line: 28
Column: 5
def __init__(self):
super(DeQuantStub, self).__init__()
def forward(self, x):
return x
class QuantWrapper(nn.Module):
r"""A wrapper class that wraps the input module, adds QuantStub and
Reported by Pylint.
Line: 28
Column: 5
def __init__(self):
super(DeQuantStub, self).__init__()
def forward(self, x):
return x
class QuantWrapper(nn.Module):
r"""A wrapper class that wraps the input module, adds QuantStub and
Reported by Pylint.
Line: 28
Column: 5
def __init__(self):
super(DeQuantStub, self).__init__()
def forward(self, x):
return x
class QuantWrapper(nn.Module):
r"""A wrapper class that wraps the input module, adds QuantStub and
Reported by Pylint.
torch/utils/benchmark/examples/sparse/compare.py
13 issues
Line: 10
Column: 1
import sys
import time
import torch
import torch.utils.benchmark as benchmark_utils
class FauxTorch(object):
"""Emulate different versions of pytorch.
Reported by Pylint.
Line: 11
Column: 1
import time
import torch
import torch.utils.benchmark as benchmark_utils
class FauxTorch(object):
"""Emulate different versions of pytorch.
Reported by Pylint.
Line: 113
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b301-pickle
print()
comparison = benchmark_utils.Compare([
pickle.loads(i) for i in serialized_results
])
print("== Unformatted " + "=" * 80 + "\n" + "/" * 95 + "\n")
comparison.print()
Reported by Bandit.
Line: 6
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b403-import-pickle
$ python -m examples.sparse.compare
"""
import pickle
import sys
import time
import torch
import torch.utils.benchmark as benchmark_utils
Reported by Bandit.
Line: 14
Column: 1
import torch.utils.benchmark as benchmark_utils
class FauxTorch(object):
"""Emulate different versions of pytorch.
In normal circumstances this would be done with multiple processes
writing serialized measurements, but this simplifies that model to
make the example clearer.
Reported by Pylint.
Line: 14
Column: 1
import torch.utils.benchmark as benchmark_utils
class FauxTorch(object):
"""Emulate different versions of pytorch.
In normal circumstances this would be done with multiple processes
writing serialized measurements, but this simplifies that model to
make the example clearer.
Reported by Pylint.
Line: 26
Column: 5
self._extra_ns_per_element = extra_ns_per_element
@property
def sparse(self):
return self.Sparse(self._real_torch, self._extra_ns_per_element)
class Sparse:
def __init__(self, real_torch, extra_ns_per_element):
self._real_torch = real_torch
Reported by Pylint.
Line: 29
Column: 5
def sparse(self):
return self.Sparse(self._real_torch, self._extra_ns_per_element)
class Sparse:
def __init__(self, real_torch, extra_ns_per_element):
self._real_torch = real_torch
self._extra_ns_per_element = extra_ns_per_element
def extra_overhead(self, result):
Reported by Pylint.
Line: 34
Column: 9
self._real_torch = real_torch
self._extra_ns_per_element = extra_ns_per_element
def extra_overhead(self, result):
# time.sleep has a ~65 us overhead, so only fake a
# per-element overhead if numel is large enough.
size = sum(result.size())
if size > 5000:
time.sleep(size * self._extra_ns_per_element * 1e-9)
Reported by Pylint.
Line: 42
Column: 9
time.sleep(size * self._extra_ns_per_element * 1e-9)
return result
def mm(self, *args, **kwargs):
return self.extra_overhead(self._real_torch.sparse.mm(*args, **kwargs))
def generate_coo_data(size, sparse_dim, nnz, dtype, device):
"""
Parameters
Reported by Pylint.
torch/jit/_state.py
13 issues
Line: 57
Column: 14
# The Python CompilationUnit. All functions and modules defined in Python will
# live in here. It's defined in Python because doing in cpp creates static
# destruction order issues.
_python_cu = torch._C.CompilationUnit()
# python class => ScriptClass mapping
_script_classes = {}
_name_to_pyclass = {}
Reported by Pylint.
Line: 9
Column: 1
functionalities in `torch.jit`.
"""
import torch
import os
import weakref
class EnabledProxy:
"""Stores whether the JIT is enabled or not.
Reported by Pylint.
Line: 10
Column: 1
"""
import torch
import os
import weakref
class EnabledProxy:
"""Stores whether the JIT is enabled or not.
This is just a wrapper for a bool, so that we get reference semantics
Reported by Pylint.
Line: 23
Column: 5
"PYTORCH_JIT", True, "> Using PyTorch JIT", "> PyTorch JIT DISABLED"
)
def parse_env(self, name, default, true_message, false_message):
value = os.environ.get(name)
if value is None:
return default
if value.lower() in {"1", "true", "yes"}:
return True
Reported by Pylint.
Line: 23
Column: 5
"PYTORCH_JIT", True, "> Using PyTorch JIT", "> PyTorch JIT DISABLED"
)
def parse_env(self, name, default, true_message, false_message):
value = os.environ.get(name)
if value is None:
return default
if value.lower() in {"1", "true", "yes"}:
return True
Reported by Pylint.
Line: 27
Column: 9
value = os.environ.get(name)
if value is None:
return default
if value.lower() in {"1", "true", "yes"}:
return True
elif value.lower() in {"0", "false", "no"}:
return False
if value == "1v":
print(true_message)
Reported by Pylint.
Line: 31
Column: 9
return True
elif value.lower() in {"0", "false", "no"}:
return False
if value == "1v":
print(true_message)
return True
elif value == "0v":
print(false_message)
return False
Reported by Pylint.
Line: 46
Column: 1
_enabled = EnabledProxy()
def disable():
_enabled.enabled = False
def enable():
_enabled.enabled = True
Reported by Pylint.
Line: 50
Column: 1
_enabled.enabled = False
def enable():
_enabled.enabled = True
# The Python CompilationUnit. All functions and modules defined in Python will
# live in here. It's defined in Python because doing in cpp creates static
Reported by Pylint.
Line: 99
Column: 5
def _try_get_jit_cached_overloads(key):
qual_names = _jit_function_overload_caching.get(key, None)
if qual_names:
return [_python_cu.find_function(qual_name) for qual_name in qual_names]
else:
return None
def _set_jit_overload_cache(key, compiled_fns):
Reported by Pylint.
torch/utils/data/_utils/signal_handling.py
13 issues
Line: 35
Column: 1
import signal
import threading
from . import IS_WINDOWS
# Some of the following imported functions are not used in this file, but are to
# be used `_utils.signal_handling.XXXXX`.
from torch._C import _set_worker_pids, _remove_worker_pids # noqa: F401
from torch._C import _error_if_any_worker_fails, _set_worker_signal_handlers # noqa: F401
Reported by Pylint.
Line: 39
Column: 1
# Some of the following imported functions are not used in this file, but are to
# be used `_utils.signal_handling.XXXXX`.
from torch._C import _set_worker_pids, _remove_worker_pids # noqa: F401
from torch._C import _error_if_any_worker_fails, _set_worker_signal_handlers # noqa: F401
_SIGCHLD_handler_set = False
r"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one
handler needs to be set for all DataLoaders in a process."""
Reported by Pylint.
Line: 39
Column: 1
# Some of the following imported functions are not used in this file, but are to
# be used `_utils.signal_handling.XXXXX`.
from torch._C import _set_worker_pids, _remove_worker_pids # noqa: F401
from torch._C import _error_if_any_worker_fails, _set_worker_signal_handlers # noqa: F401
_SIGCHLD_handler_set = False
r"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one
handler needs to be set for all DataLoaders in a process."""
Reported by Pylint.
Line: 40
Column: 1
# Some of the following imported functions are not used in this file, but are to
# be used `_utils.signal_handling.XXXXX`.
from torch._C import _set_worker_pids, _remove_worker_pids # noqa: F401
from torch._C import _error_if_any_worker_fails, _set_worker_signal_handlers # noqa: F401
_SIGCHLD_handler_set = False
r"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one
handler needs to be set for all DataLoaders in a process."""
Reported by Pylint.
Line: 52
Column: 51
if IS_WINDOWS:
return
# can't set signal in child threads
if not isinstance(threading.current_thread(), threading._MainThread): # type: ignore[attr-defined]
return
global _SIGCHLD_handler_set
if _SIGCHLD_handler_set:
return
previous_handler = signal.getsignal(signal.SIGCHLD)
Reported by Pylint.
Line: 54
Column: 5
# can't set signal in child threads
if not isinstance(threading.current_thread(), threading._MainThread): # type: ignore[attr-defined]
return
global _SIGCHLD_handler_set
if _SIGCHLD_handler_set:
return
previous_handler = signal.getsignal(signal.SIGCHLD)
if not callable(previous_handler):
# This doesn't catch default handler, but SIGCHLD default handler is a
Reported by Pylint.
Line: 39
Column: 1
# Some of the following imported functions are not used in this file, but are to
# be used `_utils.signal_handling.XXXXX`.
from torch._C import _set_worker_pids, _remove_worker_pids # noqa: F401
from torch._C import _error_if_any_worker_fails, _set_worker_signal_handlers # noqa: F401
_SIGCHLD_handler_set = False
r"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one
handler needs to be set for all DataLoaders in a process."""
Reported by Pylint.
Line: 40
Column: 1
# Some of the following imported functions are not used in this file, but are to
# be used `_utils.signal_handling.XXXXX`.
from torch._C import _set_worker_pids, _remove_worker_pids # noqa: F401
from torch._C import _error_if_any_worker_fails, _set_worker_signal_handlers # noqa: F401
_SIGCHLD_handler_set = False
r"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one
handler needs to be set for all DataLoaders in a process."""
Reported by Pylint.
Line: 42
Column: 1
from torch._C import _set_worker_pids, _remove_worker_pids # noqa: F401
from torch._C import _error_if_any_worker_fails, _set_worker_signal_handlers # noqa: F401
_SIGCHLD_handler_set = False
r"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one
handler needs to be set for all DataLoaders in a process."""
def _set_SIGCHLD_handler():
Reported by Pylint.
Line: 47
Column: 1
handler needs to be set for all DataLoaders in a process."""
def _set_SIGCHLD_handler():
# Windows doesn't support SIGCHLD handler
if IS_WINDOWS:
return
# can't set signal in child threads
if not isinstance(threading.current_thread(), threading._MainThread): # type: ignore[attr-defined]
Reported by Pylint.
torch/jit/_check.py
13 issues
Line: 63
Column: 9
def check(self, nn_module: torch.nn.Module) -> None:
# Check if we have a Python version <3.8
self.using_deprecated_ast: bool = sys.version_info < (3, 8)
source_lines = textwrap.dedent(inspect.getsource(nn_module.__class__.__init__))
# This AST only contains the `__init__` method of the nn.Module
init_ast = ast.parse(source_lines)
Reported by Pylint.
Line: 71
Column: 9
init_ast = ast.parse(source_lines)
# Get items annotated in the class body
self.class_level_annotations = list(nn_module.__annotations__.keys())
# Flag for later
self.visiting_class_level_ann = False
self.visit(init_ast)
Reported by Pylint.
Line: 74
Column: 9
self.class_level_annotations = list(nn_module.__annotations__.keys())
# Flag for later
self.visiting_class_level_ann = False
self.visit(init_ast)
def _is_empty_container(self, node: ast.AST, ann_type: str) -> bool:
if ann_type == "List":
Reported by Pylint.
Line: 122
Column: 17
try:
if (isinstance(node.value, ast.Call)
and node.targets[0].attr in self.class_level_annotations):
self.visiting_class_level_ann = True
except AttributeError:
return
self.generic_visit(node)
self.visiting_class_level_ann = False
Reported by Pylint.
Line: 126
Column: 9
except AttributeError:
return
self.generic_visit(node)
self.visiting_class_level_ann = False
def visit_AnnAssign(self, node):
"""
Visit an AnnAssign node in an ``nn.Module``'s ``__init__``
method and see if it conforms to our attribute annotation rules.
Reported by Pylint.
Line: 145
Column: 3
if node.target.attr in self.class_level_annotations:
return
# TODO @ansley: add `Union` once landed
# NB: Even though `Tuple` is a "container", we don't want to
# check for it here. `Tuple` functions as an type with an
# "infinite" number of subtypes, in the sense that you can have
# `Tuple[())]`, `Tuple[T1]`, `Tuple[T2]`, `Tuple[T1, T2]`,
Reported by Pylint.
Line: 1
Column: 1
import ast
import inspect
import sys
import textwrap
import torch
import warnings
class AttributeTypeIsSupportedChecker(ast.NodeVisitor):
Reported by Pylint.
Line: 7
Column: 1
import sys
import textwrap
import torch
import warnings
class AttributeTypeIsSupportedChecker(ast.NodeVisitor):
"""
Checks the ``__init__`` method of a given ``nn.Module`` to ensure
that all instance-level attributes can be properly initialized.
Reported by Pylint.
Line: 61
Column: 5
``__init__`` method we wish to check
"""
def check(self, nn_module: torch.nn.Module) -> None:
# Check if we have a Python version <3.8
self.using_deprecated_ast: bool = sys.version_info < (3, 8)
source_lines = textwrap.dedent(inspect.getsource(nn_module.__class__.__init__))
Reported by Pylint.
Line: 78
Column: 5
self.visit(init_ast)
def _is_empty_container(self, node: ast.AST, ann_type: str) -> bool:
if ann_type == "List":
# Assigning `[]` to a `List` type gives you a Node where
# value=List(elts=[], ctx=Load())
if not isinstance(node, ast.List):
return False
Reported by Pylint.