The following issues were found
torch/nn/modules/normalization.py
39 issues
Line: 4
Column: 1
import torch
import numbers
from torch.nn.parameter import Parameter
from .module import Module
from ._functions import CrossMapLRN2d as _cross_map_lrn2d
from .. import functional as F
from .. import init
from torch import Tensor, Size
Reported by Pylint.
Line: 5
Column: 1
import numbers
from torch.nn.parameter import Parameter
from .module import Module
from ._functions import CrossMapLRN2d as _cross_map_lrn2d
from .. import functional as F
from .. import init
from torch import Tensor, Size
from typing import Union, List, Tuple
Reported by Pylint.
Line: 6
Column: 1
from torch.nn.parameter import Parameter
from .module import Module
from ._functions import CrossMapLRN2d as _cross_map_lrn2d
from .. import functional as F
from .. import init
from torch import Tensor, Size
from typing import Union, List, Tuple
Reported by Pylint.
Line: 7
Column: 1
from .module import Module
from ._functions import CrossMapLRN2d as _cross_map_lrn2d
from .. import functional as F
from .. import init
from torch import Tensor, Size
from typing import Union, List, Tuple
Reported by Pylint.
Line: 9
Column: 1
from .. import functional as F
from .. import init
from torch import Tensor, Size
from typing import Union, List, Tuple
class LocalResponseNorm(Module):
r"""Applies local response normalization over an input signal composed
Reported by Pylint.
Line: 175
Column: 37
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
self.bias = Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
Reported by Pylint.
Line: 176
Column: 35
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
self.bias = Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
Reported by Pylint.
Line: 254
Column: 37
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(torch.empty(num_channels, **factory_kwargs))
self.bias = Parameter(torch.empty(num_channels, **factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
Reported by Pylint.
Line: 255
Column: 35
self.affine = affine
if self.affine:
self.weight = Parameter(torch.empty(num_channels, **factory_kwargs))
self.bias = Parameter(torch.empty(num_channels, **factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
Reported by Pylint.
Line: 54
Column: 23
self.beta = beta
self.k = k
def forward(self, input: Tensor) -> Tensor:
return F.local_response_norm(input, self.size, self.alpha, self.beta,
self.k)
def extra_repr(self):
return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)
Reported by Pylint.
test/jit/test_hash.py
39 issues
Line: 4
Column: 1
import os
import sys
import torch
from typing import Tuple, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 11
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
import torch
from typing import Tuple, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 6
Column: 1
import torch
from typing import Tuple, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 11
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 18
Column: 1
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestHash(JitTestCase):
def test_hash_tuple(self):
def fn(t1: Tuple[int, int], t2: Tuple[int, int]) -> bool:
return hash(t1) == hash(t2)
self.checkScript(fn, ((1, 2), (1, 2)))
Reported by Pylint.
Line: 19
Column: 5
"instead.")
class TestHash(JitTestCase):
def test_hash_tuple(self):
def fn(t1: Tuple[int, int], t2: Tuple[int, int]) -> bool:
return hash(t1) == hash(t2)
self.checkScript(fn, ((1, 2), (1, 2)))
self.checkScript(fn, ((1, 2), (3, 4)))
Reported by Pylint.
Line: 20
Column: 9
class TestHash(JitTestCase):
def test_hash_tuple(self):
def fn(t1: Tuple[int, int], t2: Tuple[int, int]) -> bool:
return hash(t1) == hash(t2)
self.checkScript(fn, ((1, 2), (1, 2)))
self.checkScript(fn, ((1, 2), (3, 4)))
self.checkScript(fn, ((1, 2), (2, 1)))
Reported by Pylint.
Line: 20
Column: 9
class TestHash(JitTestCase):
def test_hash_tuple(self):
def fn(t1: Tuple[int, int], t2: Tuple[int, int]) -> bool:
return hash(t1) == hash(t2)
self.checkScript(fn, ((1, 2), (1, 2)))
self.checkScript(fn, ((1, 2), (3, 4)))
self.checkScript(fn, ((1, 2), (2, 1)))
Reported by Pylint.
Line: 20
Column: 9
class TestHash(JitTestCase):
def test_hash_tuple(self):
def fn(t1: Tuple[int, int], t2: Tuple[int, int]) -> bool:
return hash(t1) == hash(t2)
self.checkScript(fn, ((1, 2), (1, 2)))
self.checkScript(fn, ((1, 2), (3, 4)))
self.checkScript(fn, ((1, 2), (2, 1)))
Reported by Pylint.
torch/quantization/qconfig.py
39 issues
Line: 2
Column: 1
from collections import namedtuple
from .observer import (HistogramObserver, MovingAverageMinMaxObserver,
PlaceholderObserver, default_debug_observer,
default_dynamic_quant_observer,
default_float_qparams_observer, default_observer,
default_per_channel_weight_observer,
default_placeholder_observer, default_weight_observer)
from .fake_quantize import (FakeQuantize, default_fake_quant,
default_per_channel_weight_fake_quant,
Reported by Pylint.
Line: 8
Column: 1
default_float_qparams_observer, default_observer,
default_per_channel_weight_observer,
default_placeholder_observer, default_weight_observer)
from .fake_quantize import (FakeQuantize, default_fake_quant,
default_per_channel_weight_fake_quant,
default_weight_fake_quant, default_fused_act_fake_quant, default_fused_wt_fake_quant,
FusedMovingAvgObsFakeQuantize, default_fused_per_channel_wt_fake_quant)
import torch
import torch.nn as nn
Reported by Pylint.
Line: 76
Column: 89
default_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=default_weight_observer)
float16_dynamic_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float32),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
float16_static_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float16),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
per_channel_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=default_per_channel_weight_observer)
Reported by Pylint.
Line: 77
Column: 85
default_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=default_weight_observer)
float16_dynamic_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float32),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
float16_static_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float16),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
per_channel_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=default_per_channel_weight_observer)
Reported by Pylint.
Line: 78
Column: 88
weight=default_weight_observer)
float16_dynamic_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float32),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
float16_static_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float16),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
per_channel_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=default_per_channel_weight_observer)
# TODO: this is weight only quant, change this to QConfigWeightOnly
Reported by Pylint.
Line: 79
Column: 84
float16_dynamic_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float32),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
float16_static_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float16),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
per_channel_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=default_per_channel_weight_observer)
# TODO: this is weight only quant, change this to QConfigWeightOnly
# or remove the QConfigDynamic later
Reported by Pylint.
Line: 83
Column: 3
per_channel_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=default_per_channel_weight_observer)
# TODO: this is weight only quant, change this to QConfigWeightOnly
# or remove the QConfigDynamic later
float_qparams_weight_only_qconfig = QConfigDynamic(
activation=default_placeholder_observer,
weight=default_float_qparams_observer)
Reported by Pylint.
Line: 1
Column: 1
from collections import namedtuple
from .observer import (HistogramObserver, MovingAverageMinMaxObserver,
PlaceholderObserver, default_debug_observer,
default_dynamic_quant_observer,
default_float_qparams_observer, default_observer,
default_per_channel_weight_observer,
default_placeholder_observer, default_weight_observer)
from .fake_quantize import (FakeQuantize, default_fake_quant,
default_per_channel_weight_fake_quant,
Reported by Pylint.
Line: 10
Column: 1
default_placeholder_observer, default_weight_observer)
from .fake_quantize import (FakeQuantize, default_fake_quant,
default_per_channel_weight_fake_quant,
default_weight_fake_quant, default_fused_act_fake_quant, default_fused_wt_fake_quant,
FusedMovingAvgObsFakeQuantize, default_fused_per_channel_wt_fake_quant)
import torch
import torch.nn as nn
from typing import Union, Optional, Any
Reported by Pylint.
Line: 12
Column: 1
default_per_channel_weight_fake_quant,
default_weight_fake_quant, default_fused_act_fake_quant, default_fused_wt_fake_quant,
FusedMovingAvgObsFakeQuantize, default_fused_per_channel_wt_fake_quant)
import torch
import torch.nn as nn
from typing import Union, Optional, Any
class QConfig(namedtuple('QConfig', ['activation', 'weight'])):
Reported by Pylint.
torch/onnx/__init__.py
39 issues
Line: 3
Column: 23
import torch._C as _C
TensorProtoDataType = _C._onnx.TensorProtoDataType
OperatorExportTypes = _C._onnx.OperatorExportTypes
TrainingMode = _C._onnx.TrainingMode
PYTORCH_ONNX_CAFFE2_BUNDLE = _C._onnx.PYTORCH_ONNX_CAFFE2_BUNDLE
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
Reported by Pylint.
Line: 4
Column: 23
import torch._C as _C
TensorProtoDataType = _C._onnx.TensorProtoDataType
OperatorExportTypes = _C._onnx.OperatorExportTypes
TrainingMode = _C._onnx.TrainingMode
PYTORCH_ONNX_CAFFE2_BUNDLE = _C._onnx.PYTORCH_ONNX_CAFFE2_BUNDLE
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
Reported by Pylint.
Line: 5
Column: 16
TensorProtoDataType = _C._onnx.TensorProtoDataType
OperatorExportTypes = _C._onnx.OperatorExportTypes
TrainingMode = _C._onnx.TrainingMode
PYTORCH_ONNX_CAFFE2_BUNDLE = _C._onnx.PYTORCH_ONNX_CAFFE2_BUNDLE
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
# TODO: Update these variables when there
Reported by Pylint.
Line: 6
Column: 30
TensorProtoDataType = _C._onnx.TensorProtoDataType
OperatorExportTypes = _C._onnx.OperatorExportTypes
TrainingMode = _C._onnx.TrainingMode
PYTORCH_ONNX_CAFFE2_BUNDLE = _C._onnx.PYTORCH_ONNX_CAFFE2_BUNDLE
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
# TODO: Update these variables when there
# is a new ir_version and producer_version
Reported by Pylint.
Line: 10
Column: 3
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
# TODO: Update these variables when there
# is a new ir_version and producer_version
# and use these values in the exporter
ir_version = _C._onnx.IR_VERSION
producer_name = "pytorch"
producer_version = _C._onnx.PRODUCER_VERSION
Reported by Pylint.
Line: 13
Column: 14
# TODO: Update these variables when there
# is a new ir_version and producer_version
# and use these values in the exporter
ir_version = _C._onnx.IR_VERSION
producer_name = "pytorch"
producer_version = _C._onnx.PRODUCER_VERSION
constant_folding_opset_versions = [9, 10, 11, 12, 13]
Reported by Pylint.
Line: 15
Column: 20
# and use these values in the exporter
ir_version = _C._onnx.IR_VERSION
producer_name = "pytorch"
producer_version = _C._onnx.PRODUCER_VERSION
constant_folding_opset_versions = [9, 10, 11, 12, 13]
class ExportTypes:
PROTOBUF_FILE = 1
Reported by Pylint.
Line: 28
Column: 14
def _export(*args, **kwargs):
from torch.onnx import utils
result = utils._export(*args, **kwargs)
return result
def export(model, args, f, export_params=True, verbose=False, training=TrainingMode.EVAL,
input_names=None, output_names=None, aten=False,
Reported by Pylint.
Line: 323
Column: 12
def _export_to_pretty_string(*args, **kwargs):
from torch.onnx import utils
return utils._export_to_pretty_string(*args, **kwargs)
def _optimize_trace(graph, operator_export_type):
from torch.onnx import utils
return utils._optimize_graph(graph, operator_export_type)
Reported by Pylint.
Line: 328
Column: 12
def _optimize_trace(graph, operator_export_type):
from torch.onnx import utils
return utils._optimize_graph(graph, operator_export_type)
def select_model_mode_for_export(model, mode):
r"""
A context manager to temporarily set the training mode of ``model``
Reported by Pylint.
test/cpp_api_parity/functional_impl_check.py
39 issues
Line: 23
Column: 1
import pprint
import os
import torch
from cpp_api_parity.utils import TorchNNFunctionalTestParams, TORCH_NN_COMMON_TEST_HARNESS, \
compile_cpp_code_inline, set_python_tensors_requires_grad, move_python_tensors_to_device, \
add_test, compute_cpp_args_construction_stmts_and_forward_arg_symbols, serialize_arg_dict_as_script_module, \
compute_arg_dict, decorate_test_fn, compute_temp_file_path, generate_error_msg, is_torch_nn_functional_test, \
try_remove_folder
Reported by Pylint.
Line: 61
Column: 17
}
""")
def run_forward(unit_test_class, test_params):
device = test_params.device
inputs = set_python_tensors_requires_grad(move_python_tensors_to_device(
[arg_value for _, arg_value in test_params.arg_dict['input']], device))
inputs += move_python_tensors_to_device(
Reported by Pylint.
Line: 202
Column: 94
def test_fn(self):
test_forward(
unit_test_class=self, test_params=unit_test_class.functional_test_params_map[self._testMethodName])
test_fn = decorate_test_fn(
test_fn=test_fn,
test_cuda=test_params_dict.get('test_cuda', True),
has_impl_parity=parity_table['torch::nn::functional'][functional_full_name][0] and
Reported by Pylint.
Line: 228
Column: 9
assert len(unit_test_class.functional_test_params_map) > 0
cpp_sources = TORCH_NN_COMMON_TEST_HARNESS + SAMPLE_FUNCTIONAL_CPP_SOURCE
functions = []
for test_name, test_params in unit_test_class.functional_test_params_map.items():
cpp_sources += generate_test_cpp_sources(test_params=test_params, template=TORCH_NN_FUNCTIONAL_TEST_FORWARD)
functions.append('{}_test_forward'.format(test_params.functional_variant_name))
if print_cpp_source:
print(cpp_sources)
Reported by Pylint.
Line: 1
Column: 1
# The purpose of this test is to check that we have implementation parity between
# a Python `torch.nn.functional` function and its corresponding C++ `torch::nn::functional`
# function. Concretely, this test does the following:
#
# 1. Get a test params dict from common_nn.py, run forward pass on the Python functional
# created using the test params.
#
# 2. Serialize the Python functional's forward input arguments, deserialize them
# in C++ and use them as input for the C++ functional's forward pass.
Reported by Pylint.
Line: 26
Column: 1
import torch
from cpp_api_parity.utils import TorchNNFunctionalTestParams, TORCH_NN_COMMON_TEST_HARNESS, \
compile_cpp_code_inline, set_python_tensors_requires_grad, move_python_tensors_to_device, \
add_test, compute_cpp_args_construction_stmts_and_forward_arg_symbols, serialize_arg_dict_as_script_module, \
compute_arg_dict, decorate_test_fn, compute_temp_file_path, generate_error_msg, is_torch_nn_functional_test, \
try_remove_folder
from cpp_api_parity.sample_functional import SAMPLE_FUNCTIONAL_CPP_SOURCE
# Expected substitutions:
Reported by Pylint.
Line: 27
Column: 1
from cpp_api_parity.utils import TorchNNFunctionalTestParams, TORCH_NN_COMMON_TEST_HARNESS, \
compile_cpp_code_inline, set_python_tensors_requires_grad, move_python_tensors_to_device, \
add_test, compute_cpp_args_construction_stmts_and_forward_arg_symbols, serialize_arg_dict_as_script_module, \
compute_arg_dict, decorate_test_fn, compute_temp_file_path, generate_error_msg, is_torch_nn_functional_test, \
try_remove_folder
from cpp_api_parity.sample_functional import SAMPLE_FUNCTIONAL_CPP_SOURCE
# Expected substitutions:
#
Reported by Pylint.
Line: 61
Column: 1
}
""")
def run_forward(unit_test_class, test_params):
device = test_params.device
inputs = set_python_tensors_requires_grad(move_python_tensors_to_device(
[arg_value for _, arg_value in test_params.arg_dict['input']], device))
inputs += move_python_tensors_to_device(
Reported by Pylint.
Line: 79
Column: 1
return python_output
def test_forward(unit_test_class, test_params):
functional_variant_name = test_params.functional_variant_name
cpp_tmp_folder = test_params.cpp_tmp_folder
# Remove the temporary folder if it exists already
try_remove_folder(cpp_tmp_folder)
os.mkdir(cpp_tmp_folder)
Reported by Pylint.
Line: 97
Column: 1
cpp_test_fn = getattr(unit_test_class.functional_impl_check_cpp_module, cpp_test_name)
def run_cpp_test_fn_and_check_output():
forward_output_file_path = compute_temp_file_path(cpp_tmp_folder, functional_variant_name, 'forward_output')
cpp_test_fn(arg_dict_file_path, forward_output_file_path)
cpp_output = torch.load(forward_output_file_path)
# Check that forward outputs are equal
Reported by Pylint.
caffe2/python/operator_test/lengths_reducer_fused_nbit_rowwise_ops_test.py
38 issues
Line: 4
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given
Reported by Pylint.
Line: 7
Column: 1
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given
class TestLengthsReducerOpsFusedNBitRowwise(hu.HypothesisTestCase):
@given(
num_rows=st.integers(1, 20),
Reported by Pylint.
Line: 1
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given
class TestLengthsReducerOpsFusedNBitRowwise(hu.HypothesisTestCase):
@given(
num_rows=st.integers(1, 20),
blocksize=st.sampled_from([8, 12, 16, 32, 64, 96, 128]),
weighted=st.booleans(),
seed=st.integers(0, 2 ** 32 - 1),
Reported by Pylint.
Line: 19
Column: 5
empty_indices=st.booleans(),
engine=st.sampled_from(["", "GREEDY"]),
bit_rate=st.sampled_from([2, 4]),
)
def test_sparse_lengths_sum(
self, num_rows, blocksize, weighted, seed, empty_indices, engine, bit_rate
):
net = core.Net("bench")
Reported by Pylint.
Line: 19
Column: 5
empty_indices=st.booleans(),
engine=st.sampled_from(["", "GREEDY"]),
bit_rate=st.sampled_from([2, 4]),
)
def test_sparse_lengths_sum(
self, num_rows, blocksize, weighted, seed, empty_indices, engine, bit_rate
):
net = core.Net("bench")
Reported by Pylint.
Line: 19
Column: 5
empty_indices=st.booleans(),
engine=st.sampled_from(["", "GREEDY"]),
bit_rate=st.sampled_from([2, 4]),
)
def test_sparse_lengths_sum(
self, num_rows, blocksize, weighted, seed, empty_indices, engine, bit_rate
):
net = core.Net("bench")
Reported by Pylint.
Line: 19
Column: 5
empty_indices=st.booleans(),
engine=st.sampled_from(["", "GREEDY"]),
bit_rate=st.sampled_from([2, 4]),
)
def test_sparse_lengths_sum(
self, num_rows, blocksize, weighted, seed, empty_indices, engine, bit_rate
):
net = core.Net("bench")
Reported by Pylint.
Line: 45
Column: 9
)
weights = np.random.uniform(size=[len(indices)]).astype(np.float32)
op = core.CreateOperator(
"FloatToFused" + str(bit_rate) + "BitRowwiseQuantized",
"input_data",
"quantized_data",
engine=engine,
)
Reported by Pylint.
Line: 52
Column: 9
engine=engine,
)
net.Proto().op.extend([op])
op = core.CreateOperator(
"Fused" + str(bit_rate) + "BitRowwiseQuantizedToFloat",
"quantized_data",
"dequantized_data",
)
net.Proto().op.extend([op])
Reported by Pylint.
torch/distributed/rpc/internal.py
38 issues
Line: 12
Column: 1
import torch
import torch.distributed as dist
from torch._C._distributed_rpc import _get_current_rpc_agent
# Thread local tensor tables to store tensors while pickling torch.Tensor
# objects
_thread_local_tensor_tables = threading.local()
Reported by Pylint.
Line: 266
Column: 10
profile_key = "rpc_{}#{}({} -> {})".format(
exec_type.value, str(func_name), current_worker_name, dest_worker_name
)
rf = torch.autograd._RecordFunction() # type: ignore[attr-defined]
torch.autograd._run_before_callbacks(rf, profile_key) # type: ignore[attr-defined]
return rf
PythonUDF = collections.namedtuple("PythonUDF", ["func", "args", "kwargs"])
Reported by Pylint.
Line: 267
Column: 5
exec_type.value, str(func_name), current_worker_name, dest_worker_name
)
rf = torch.autograd._RecordFunction() # type: ignore[attr-defined]
torch.autograd._run_before_callbacks(rf, profile_key) # type: ignore[attr-defined]
return rf
PythonUDF = collections.namedtuple("PythonUDF", ["func", "args", "kwargs"])
RemoteException = collections.namedtuple("RemoteException", ["msg", "exception_type"])
Reported by Pylint.
Line: 54
Column: 9
@classmethod
def _tensor_receiver(cls, tensor_index):
global _thread_local_tensor_tables
return _thread_local_tensor_tables.recv_tables[tensor_index]
def _tensor_reducer(self, tensor):
global _thread_local_tensor_tables
_thread_local_tensor_tables.send_tables.append(tensor)
Reported by Pylint.
Line: 58
Column: 9
return _thread_local_tensor_tables.recv_tables[tensor_index]
def _tensor_reducer(self, tensor):
global _thread_local_tensor_tables
_thread_local_tensor_tables.send_tables.append(tensor)
tensor_index = len(_thread_local_tensor_tables.send_tables) - 1
return (_InternalRPCPickler._tensor_receiver, (tensor_index,))
@classmethod
Reported by Pylint.
Line: 65
Column: 16
@classmethod
def _py_rref_receiver(cls, rref_fork_data):
return dist.rpc.PyRRef._deserialize(rref_fork_data)
def _py_rref_reducer(self, py_rref):
rref_fork_data = py_rref._serialize()
return (_InternalRPCPickler._py_rref_receiver, (rref_fork_data,))
Reported by Pylint.
Line: 68
Column: 26
return dist.rpc.PyRRef._deserialize(rref_fork_data)
def _py_rref_reducer(self, py_rref):
rref_fork_data = py_rref._serialize()
return (_InternalRPCPickler._py_rref_receiver, (rref_fork_data,))
def _rref_reducer(self, rref):
return self._py_rref_reducer(rref)
Reported by Pylint.
Line: 126
Column: 9
p.dispatch_table[class_name] = self._class_reducer_dict[class_name] # type: ignore[index]
# save _thread_local_tensor_tables.send_tables if it is in nested call
global _thread_local_tensor_tables
if hasattr(_thread_local_tensor_tables, "send_tables"):
old_send_tables = _thread_local_tensor_tables.send_tables
else:
old_send_tables = None
_thread_local_tensor_tables.send_tables = []
Reported by Pylint.
Line: 150
Column: 9
Deserialize binary string + tensor table to original obj
"""
# save _thread_local_tensor_tables.recv_tables if it is in nested call
global _thread_local_tensor_tables
if hasattr(_thread_local_tensor_tables, "recv_tables"):
old_recv_tables = _thread_local_tensor_tables.recv_tables
else:
old_recv_tables = None
_thread_local_tensor_tables.recv_tables = tensor_table
Reported by Pylint.
Line: 205
Column: 12
if isinstance(python_udf, AttributeError):
raise python_udf
result = python_udf.func(*python_udf.args, **python_udf.kwargs)
except Exception as e:
# except str = exception info + traceback string
except_str = (
f"On {_get_current_rpc_agent().get_worker_info()}:\n"
f"{repr(e)}\n{traceback.format_exc()}"
)
Reported by Pylint.
torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py
38 issues
Line: 80
Column: 16
def reset(self):
self.iter = 0
return torch.randn(self.state_dim)
def step(self, action):
self.iter += 1
state = torch.randn(self.state_dim)
reward = torch.rand(1).item() * self.reward_threshold
Reported by Pylint.
Line: 84
Column: 17
def step(self, action):
self.iter += 1
state = torch.randn(self.state_dim)
reward = torch.rand(1).item() * self.reward_threshold
done = self.iter >= self.num_iters
info = {}
return state, reward, done, info
Reported by Pylint.
Line: 85
Column: 18
def step(self, action):
self.iter += 1
state = torch.randn(self.state_dim)
reward = torch.rand(1).item() * self.reward_threshold
done = self.iter >= self.num_iters
info = {}
return state, reward, done, info
Reported by Pylint.
Line: 211
Column: 19
for r in rewards[::-1]:
R = r + GAMMA * R
returns.insert(0, R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + self.eps)
for log_prob, R in zip(probs, returns):
policy_loss.append(-log_prob * R)
self.optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum()
Reported by Pylint.
Line: 216
Column: 23
for log_prob, R in zip(probs, returns):
policy_loss.append(-log_prob * R)
self.optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
self.optimizer.step()
return min_reward
Reported by Pylint.
Line: 235
Column: 12
class ReinforcementLearningRpcTest(RpcAgentTestFixture):
@dist_init(setup_rpc=False)
def test_rl_rpc(self):
if self.rank == 0:
# Rank 0 is the agent.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
Reported by Pylint.
Line: 238
Column: 34
if self.rank == 0:
# Rank 0 is the agent.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
Reported by Pylint.
Line: 240
Column: 22
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
agent = Agent(self.world_size)
run_agent(agent, n_steps=int(TOTAL_EPISODE_STEP / (self.world_size - 1)))
Reported by Pylint.
Line: 249
Column: 13
# Ensure training was run. We don't really care about whether the task was learned,
# since the purpose of the test is to check the API calls.
self.assertGreater(agent.running_reward, 0.0)
else:
# Other ranks are observers that passively wait for instructions from the agent.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
Reported by Pylint.
Line: 253
Column: 34
else:
# Other ranks are observers that passively wait for instructions from the agent.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
Reported by Pylint.
caffe2/python/ideep/spatial_bn_op_test.py
38 issues
Line: 6
Column: 1
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 7
Column: 1
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestSpatialBN(hu.HypothesisTestCase):
@given(size=st.integers(7, 10),
input_channels=st.integers(7, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
Reported by Pylint.
Line: 27
Column: 53
**mu.gcs)
@settings(deadline=1000)
def test_spatialbn_test_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["X" if inplace else "Y"],
Reported by Pylint.
Line: 28
Column: 22
@settings(deadline=1000)
def test_spatialbn_test_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["X" if inplace else "Y"],
order=order,
Reported by Pylint.
Line: 38
Column: 9
epsilon=epsilon
)
def reference_spatialbn_test(X, scale, bias, mean, var):
if order == "NCHW":
scale = scale[np.newaxis, :, np.newaxis, np.newaxis]
bias = bias[np.newaxis, :, np.newaxis, np.newaxis]
mean = mean[np.newaxis, :, np.newaxis, np.newaxis]
var = var[np.newaxis, :, np.newaxis, np.newaxis]
Reported by Pylint.
Line: 68
Column: 53
inplace=st.sampled_from([True, False]),
**mu.gcs)
def test_spatialbn_train_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, gc, dc):
print("dc0: {}, dc1: {}".format(dc[0], dc[1]))
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "running_mean", "running_var"],
Reported by Pylint.
Line: 69
Column: 22
**mu.gcs)
def test_spatialbn_train_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, gc, dc):
print("dc0: {}, dc1: {}".format(dc[0], dc[1]))
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "running_mean", "running_var"],
["X" if inplace else "Y",
Reported by Pylint.
Line: 91
Column: 3
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
# TODO: It looks like IDEEP spatial_bn op outputs save_var (output[4])
# as the reciprocal of CPU op's output. Need to check back and add
# output[4] for comparison
self.assertDeviceChecks(dc, op, [X, scale, bias, running_mean, running_var],
[0, 1, 2, 3])
Reported by Pylint.
Line: 107
Column: 17
@settings(deadline=None, max_examples=50)
def test_spatialbn_train_mode_gradient_check(
self, size, input_channels, batch_size, seed, order, epsilon,
gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["Y", "mean", "var", "saved_mean", "saved_var"],
order=order,
Reported by Pylint.
benchmarks/operator_benchmark/pt/batchnorm_test.py
38 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
"""Microbenchmarks for batchnorm operator."""
# Benchmark cudnn if available
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
"""Microbenchmarks for batchnorm operator."""
# Benchmark cudnn if available
Reported by Pylint.
Line: 24
Column: 51
return [(*config, dict(cudnn=False)) for config in configs]
batchnorm_configs_short = cudnn_benchmark_configs(op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 256, 3136],
],
cross_product_configs={
Reported by Pylint.
Line: 36
Column: 50
tags=["short"]
))
batchnorm_configs_long = cudnn_benchmark_configs(op_bench.cross_product_configs(
M=[2, 128],
N=[8192, 2048],
K=[1],
device=['cpu', 'cuda'],
training=[True, False],
Reported by Pylint.
Line: 46
Column: 26
))
class BatchNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, training, cudnn):
self.inputs = {
"input_one": torch.rand(M, N, K, device=device, requires_grad=self.auto_set()),
"mean": torch.rand(N, device=device),
"var": torch.rand(N, device=device),
Reported by Pylint.
Line: 64
Column: 1
return F.batch_norm(input_one, mean, var, weight, bias, training)
op_bench.generate_pt_test(batchnorm_configs_short + batchnorm_configs_long, BatchNormBenchmark)
op_bench.generate_pt_gradient_test(batchnorm_configs_short + batchnorm_configs_long, BatchNormBenchmark)
batchnorm1d_configs_short = cudnn_benchmark_configs(op_bench.config_list(
attr_names=["N", "C"],
Reported by Pylint.
Line: 65
Column: 1
op_bench.generate_pt_test(batchnorm_configs_short + batchnorm_configs_long, BatchNormBenchmark)
op_bench.generate_pt_gradient_test(batchnorm_configs_short + batchnorm_configs_long, BatchNormBenchmark)
batchnorm1d_configs_short = cudnn_benchmark_configs(op_bench.config_list(
attr_names=["N", "C"],
attrs=[
Reported by Pylint.
Line: 68
Column: 53
op_bench.generate_pt_gradient_test(batchnorm_configs_short + batchnorm_configs_long, BatchNormBenchmark)
batchnorm1d_configs_short = cudnn_benchmark_configs(op_bench.config_list(
attr_names=["N", "C"],
attrs=[
[3136, 256],
],
cross_product_configs={
Reported by Pylint.
Line: 80
Column: 52
tags=["short"]
))
batchnorm1d_configs_long = cudnn_benchmark_configs(op_bench.cross_product_configs(
N=[2, 128],
C=[8192, 2048],
device=['cpu', 'cuda'],
training=[True, False],
tags=["long"]
Reported by Pylint.
Line: 88
Column: 28
tags=["long"]
))
class BatchNorm1dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, device, training, cudnn):
self.inputs = {
"input_one": torch.rand(N, C, device=device, requires_grad=self.auto_set()),
"mean": torch.rand(C, device=device),
"var": torch.rand(C, device=device),
Reported by Pylint.