The following issues were found
torch/nn/modules/normalization.py
39 issues
Line: 4
Column: 1
import torch
import numbers
from torch.nn.parameter import Parameter
from .module import Module
from ._functions import CrossMapLRN2d as _cross_map_lrn2d
from .. import functional as F
from .. import init
from torch import Tensor, Size
Reported by Pylint.
Line: 5
Column: 1
import numbers
from torch.nn.parameter import Parameter
from .module import Module
from ._functions import CrossMapLRN2d as _cross_map_lrn2d
from .. import functional as F
from .. import init
from torch import Tensor, Size
from typing import Union, List, Tuple
Reported by Pylint.
Line: 6
Column: 1
from torch.nn.parameter import Parameter
from .module import Module
from ._functions import CrossMapLRN2d as _cross_map_lrn2d
from .. import functional as F
from .. import init
from torch import Tensor, Size
from typing import Union, List, Tuple
Reported by Pylint.
Line: 7
Column: 1
from .module import Module
from ._functions import CrossMapLRN2d as _cross_map_lrn2d
from .. import functional as F
from .. import init
from torch import Tensor, Size
from typing import Union, List, Tuple
Reported by Pylint.
Line: 9
Column: 1
from .. import functional as F
from .. import init
from torch import Tensor, Size
from typing import Union, List, Tuple
class LocalResponseNorm(Module):
r"""Applies local response normalization over an input signal composed
Reported by Pylint.
Line: 175
Column: 37
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
self.bias = Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
Reported by Pylint.
Line: 176
Column: 35
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
self.bias = Parameter(torch.empty(self.normalized_shape, **factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
Reported by Pylint.
Line: 254
Column: 37
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(torch.empty(num_channels, **factory_kwargs))
self.bias = Parameter(torch.empty(num_channels, **factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
Reported by Pylint.
Line: 255
Column: 35
self.affine = affine
if self.affine:
self.weight = Parameter(torch.empty(num_channels, **factory_kwargs))
self.bias = Parameter(torch.empty(num_channels, **factory_kwargs))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
Reported by Pylint.
Line: 54
Column: 23
self.beta = beta
self.k = k
def forward(self, input: Tensor) -> Tensor:
return F.local_response_norm(input, self.size, self.alpha, self.beta,
self.k)
def extra_repr(self):
return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)
Reported by Pylint.
torch/autograd/__init__.py
39 issues
Line: 15
Column: 1
from torch.types import _TensorOrTensors
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union
from .variable import Variable
from .function import Function, NestedIOFunction
from .gradcheck import gradcheck, gradgradcheck
from .grad_mode import no_grad, enable_grad, set_grad_enabled, inference_mode
from .anomaly_mode import detect_anomaly, set_detect_anomaly
from ..overrides import has_torch_function, handle_torch_function
Reported by Pylint.
Line: 16
Column: 1
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union
from .variable import Variable
from .function import Function, NestedIOFunction
from .gradcheck import gradcheck, gradgradcheck
from .grad_mode import no_grad, enable_grad, set_grad_enabled, inference_mode
from .anomaly_mode import detect_anomaly, set_detect_anomaly
from ..overrides import has_torch_function, handle_torch_function
from . import functional
Reported by Pylint.
Line: 17
Column: 1
from .variable import Variable
from .function import Function, NestedIOFunction
from .gradcheck import gradcheck, gradgradcheck
from .grad_mode import no_grad, enable_grad, set_grad_enabled, inference_mode
from .anomaly_mode import detect_anomaly, set_detect_anomaly
from ..overrides import has_torch_function, handle_torch_function
from . import functional
from . import forward_ad
Reported by Pylint.
Line: 18
Column: 1
from .variable import Variable
from .function import Function, NestedIOFunction
from .gradcheck import gradcheck, gradgradcheck
from .grad_mode import no_grad, enable_grad, set_grad_enabled, inference_mode
from .anomaly_mode import detect_anomaly, set_detect_anomaly
from ..overrides import has_torch_function, handle_torch_function
from . import functional
from . import forward_ad
from . import graph
Reported by Pylint.
Line: 19
Column: 1
from .function import Function, NestedIOFunction
from .gradcheck import gradcheck, gradgradcheck
from .grad_mode import no_grad, enable_grad, set_grad_enabled, inference_mode
from .anomaly_mode import detect_anomaly, set_detect_anomaly
from ..overrides import has_torch_function, handle_torch_function
from . import functional
from . import forward_ad
from . import graph
Reported by Pylint.
Line: 20
Column: 1
from .gradcheck import gradcheck, gradgradcheck
from .grad_mode import no_grad, enable_grad, set_grad_enabled, inference_mode
from .anomaly_mode import detect_anomaly, set_detect_anomaly
from ..overrides import has_torch_function, handle_torch_function
from . import functional
from . import forward_ad
from . import graph
__all__ = ['Variable', 'Function', 'backward', 'grad_mode']
Reported by Pylint.
Line: 25
Column: 48
from . import forward_ad
from . import graph
__all__ = ['Variable', 'Function', 'backward', 'grad_mode']
_OptionalTensor = Optional[torch.Tensor]
def _make_grads(outputs: Sequence[torch.Tensor], grads: Sequence[_OptionalTensor]) -> Tuple[_OptionalTensor, ...]:
new_grads: List[_OptionalTensor] = []
Reported by Pylint.
Line: 52
Column: 34
if out.requires_grad:
if out.numel() != 1:
raise RuntimeError("grad can be implicitly created only for scalar outputs")
new_grads.append(torch.ones_like(out, memory_format=torch.preserve_format))
else:
new_grads.append(None)
else:
raise TypeError("gradients can be either Tensors or None, but got " +
type(grad).__name__)
Reported by Pylint.
Line: 52
Column: 69
if out.requires_grad:
if out.numel() != 1:
raise RuntimeError("grad can be implicitly created only for scalar outputs")
new_grads.append(torch.ones_like(out, memory_format=torch.preserve_format))
else:
new_grads.append(None)
else:
raise TypeError("gradients can be either Tensors or None, but got " +
type(grad).__name__)
Reported by Pylint.
Line: 257
Column: 12
def variable(*args, **kwargs):
warnings.warn("torch.autograd.variable(...) is deprecated, use torch.tensor(...) instead")
return torch.tensor(*args, **kwargs)
if not torch._C._autograd_init():
raise RuntimeError("autograd initialization failed")
# Import all native method/classes
Reported by Pylint.
torch/ao/nn/sparse/quantized/dynamic/linear.py
39 issues
Line: 19
Column: 100
_op_type = "sparse_dynamic"
_FLOAT_MODULE = torch.nn.Linear
def __init__(self, in_features, out_features, row_block_size, col_block_size, bias=True, dtype=torch.qint8):
super().__init__()
if dtype != torch.qint8:
raise NotImplementedError("Only QINT8 is supported for Sparse Quantized Linear Dynamic")
Reported by Pylint.
Line: 22
Column: 21
def __init__(self, in_features, out_features, row_block_size, col_block_size, bias=True, dtype=torch.qint8):
super().__init__()
if dtype != torch.qint8:
raise NotImplementedError("Only QINT8 is supported for Sparse Quantized Linear Dynamic")
self.in_features = in_features
self.out_features = out_features
Reported by Pylint.
Line: 29
Column: 57
self.out_features = out_features
if bias:
bias = torch.zeros(self.out_features, dtype=torch.float)
else:
bias = None
qweight = torch._empty_affine_quantized([out_features, in_features],
scale=1, zero_point=0, dtype=torch.qint8)
Reported by Pylint.
Line: 29
Column: 20
self.out_features = out_features
if bias:
bias = torch.zeros(self.out_features, dtype=torch.float)
else:
bias = None
qweight = torch._empty_affine_quantized([out_features, in_features],
scale=1, zero_point=0, dtype=torch.qint8)
Reported by Pylint.
Line: 33
Column: 19
else:
bias = None
qweight = torch._empty_affine_quantized([out_features, in_features],
scale=1, zero_point=0, dtype=torch.qint8)
self._packed_params = linear.LinearPackedParams(dtype)
self._packed_params.set_weight_bias(qweight, bias, row_block_size, col_block_size)
def _get_name(self):
Reported by Pylint.
Line: 34
Column: 78
bias = None
qweight = torch._empty_affine_quantized([out_features, in_features],
scale=1, zero_point=0, dtype=torch.qint8)
self._packed_params = linear.LinearPackedParams(dtype)
self._packed_params.set_weight_bias(qweight, bias, row_block_size, col_block_size)
def _get_name(self):
return 'SparseQuantizedDynamicLinear'
Reported by Pylint.
Line: 121
Column: 25
weight_observer(weight)
dtype = weight_observer.dtype
assert dtype == torch.qint8, 'Weight observer must have dtype torch.qint8'
w_sc, w_zp = weight_observer.calculate_qparams()
if isinstance(w_zp, torch.Tensor):
assert not torch.any(w_zp.bool()), "All weight zero points must map to 0"
else:
assert w_zp == 0, 'Weight zero point must map to 0'
Reported by Pylint.
Line: 124
Column: 24
assert dtype == torch.qint8, 'Weight observer must have dtype torch.qint8'
w_sc, w_zp = weight_observer.calculate_qparams()
if isinstance(w_zp, torch.Tensor):
assert not torch.any(w_zp.bool()), "All weight zero points must map to 0"
else:
assert w_zp == 0, 'Weight zero point must map to 0'
qweight = _quantize_weight(weight.float(), weight_observer)
row_block_size, col_block_size = LinearBlockSparsePattern.block_size()
Reported by Pylint.
Line: 50
Column: 52
return hide_packed_params_repr(self, linear.LinearPackedParams)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.ops.sparse.qlinear_dynamic(x, self._packed_params._packed_params)
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + 'op_type'] = self._op_type
Reported by Pylint.
Line: 78
Column: 16
missing_keys, unexpected_keys, error_msgs)
def _weight_bias(self):
return self._packed_params._weight_bias()
def weight(self):
return self._weight_bias()[0]
def bias(self):
Reported by Pylint.
torch/onnx/__init__.py
39 issues
Line: 3
Column: 23
import torch._C as _C
TensorProtoDataType = _C._onnx.TensorProtoDataType
OperatorExportTypes = _C._onnx.OperatorExportTypes
TrainingMode = _C._onnx.TrainingMode
PYTORCH_ONNX_CAFFE2_BUNDLE = _C._onnx.PYTORCH_ONNX_CAFFE2_BUNDLE
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
Reported by Pylint.
Line: 4
Column: 23
import torch._C as _C
TensorProtoDataType = _C._onnx.TensorProtoDataType
OperatorExportTypes = _C._onnx.OperatorExportTypes
TrainingMode = _C._onnx.TrainingMode
PYTORCH_ONNX_CAFFE2_BUNDLE = _C._onnx.PYTORCH_ONNX_CAFFE2_BUNDLE
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
Reported by Pylint.
Line: 5
Column: 16
TensorProtoDataType = _C._onnx.TensorProtoDataType
OperatorExportTypes = _C._onnx.OperatorExportTypes
TrainingMode = _C._onnx.TrainingMode
PYTORCH_ONNX_CAFFE2_BUNDLE = _C._onnx.PYTORCH_ONNX_CAFFE2_BUNDLE
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
# TODO: Update these variables when there
Reported by Pylint.
Line: 6
Column: 30
TensorProtoDataType = _C._onnx.TensorProtoDataType
OperatorExportTypes = _C._onnx.OperatorExportTypes
TrainingMode = _C._onnx.TrainingMode
PYTORCH_ONNX_CAFFE2_BUNDLE = _C._onnx.PYTORCH_ONNX_CAFFE2_BUNDLE
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
# TODO: Update these variables when there
# is a new ir_version and producer_version
Reported by Pylint.
Line: 10
Column: 3
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
# TODO: Update these variables when there
# is a new ir_version and producer_version
# and use these values in the exporter
ir_version = _C._onnx.IR_VERSION
producer_name = "pytorch"
producer_version = _C._onnx.PRODUCER_VERSION
Reported by Pylint.
Line: 13
Column: 14
# TODO: Update these variables when there
# is a new ir_version and producer_version
# and use these values in the exporter
ir_version = _C._onnx.IR_VERSION
producer_name = "pytorch"
producer_version = _C._onnx.PRODUCER_VERSION
constant_folding_opset_versions = [9, 10, 11, 12, 13]
Reported by Pylint.
Line: 15
Column: 20
# and use these values in the exporter
ir_version = _C._onnx.IR_VERSION
producer_name = "pytorch"
producer_version = _C._onnx.PRODUCER_VERSION
constant_folding_opset_versions = [9, 10, 11, 12, 13]
class ExportTypes:
PROTOBUF_FILE = 1
Reported by Pylint.
Line: 28
Column: 14
def _export(*args, **kwargs):
from torch.onnx import utils
result = utils._export(*args, **kwargs)
return result
def export(model, args, f, export_params=True, verbose=False, training=TrainingMode.EVAL,
input_names=None, output_names=None, aten=False,
Reported by Pylint.
Line: 323
Column: 12
def _export_to_pretty_string(*args, **kwargs):
from torch.onnx import utils
return utils._export_to_pretty_string(*args, **kwargs)
def _optimize_trace(graph, operator_export_type):
from torch.onnx import utils
return utils._optimize_graph(graph, operator_export_type)
Reported by Pylint.
Line: 328
Column: 12
def _optimize_trace(graph, operator_export_type):
from torch.onnx import utils
return utils._optimize_graph(graph, operator_export_type)
def select_model_mode_for_export(model, mode):
r"""
A context manager to temporarily set the training mode of ``model``
Reported by Pylint.
test/cpp_api_parity/functional_impl_check.py
39 issues
Line: 23
Column: 1
import pprint
import os
import torch
from cpp_api_parity.utils import TorchNNFunctionalTestParams, TORCH_NN_COMMON_TEST_HARNESS, \
compile_cpp_code_inline, set_python_tensors_requires_grad, move_python_tensors_to_device, \
add_test, compute_cpp_args_construction_stmts_and_forward_arg_symbols, serialize_arg_dict_as_script_module, \
compute_arg_dict, decorate_test_fn, compute_temp_file_path, generate_error_msg, is_torch_nn_functional_test, \
try_remove_folder
Reported by Pylint.
Line: 61
Column: 17
}
""")
def run_forward(unit_test_class, test_params):
device = test_params.device
inputs = set_python_tensors_requires_grad(move_python_tensors_to_device(
[arg_value for _, arg_value in test_params.arg_dict['input']], device))
inputs += move_python_tensors_to_device(
Reported by Pylint.
Line: 202
Column: 94
def test_fn(self):
test_forward(
unit_test_class=self, test_params=unit_test_class.functional_test_params_map[self._testMethodName])
test_fn = decorate_test_fn(
test_fn=test_fn,
test_cuda=test_params_dict.get('test_cuda', True),
has_impl_parity=parity_table['torch::nn::functional'][functional_full_name][0] and
Reported by Pylint.
Line: 228
Column: 9
assert len(unit_test_class.functional_test_params_map) > 0
cpp_sources = TORCH_NN_COMMON_TEST_HARNESS + SAMPLE_FUNCTIONAL_CPP_SOURCE
functions = []
for test_name, test_params in unit_test_class.functional_test_params_map.items():
cpp_sources += generate_test_cpp_sources(test_params=test_params, template=TORCH_NN_FUNCTIONAL_TEST_FORWARD)
functions.append('{}_test_forward'.format(test_params.functional_variant_name))
if print_cpp_source:
print(cpp_sources)
Reported by Pylint.
Line: 1
Column: 1
# The purpose of this test is to check that we have implementation parity between
# a Python `torch.nn.functional` function and its corresponding C++ `torch::nn::functional`
# function. Concretely, this test does the following:
#
# 1. Get a test params dict from common_nn.py, run forward pass on the Python functional
# created using the test params.
#
# 2. Serialize the Python functional's forward input arguments, deserialize them
# in C++ and use them as input for the C++ functional's forward pass.
Reported by Pylint.
Line: 26
Column: 1
import torch
from cpp_api_parity.utils import TorchNNFunctionalTestParams, TORCH_NN_COMMON_TEST_HARNESS, \
compile_cpp_code_inline, set_python_tensors_requires_grad, move_python_tensors_to_device, \
add_test, compute_cpp_args_construction_stmts_and_forward_arg_symbols, serialize_arg_dict_as_script_module, \
compute_arg_dict, decorate_test_fn, compute_temp_file_path, generate_error_msg, is_torch_nn_functional_test, \
try_remove_folder
from cpp_api_parity.sample_functional import SAMPLE_FUNCTIONAL_CPP_SOURCE
# Expected substitutions:
Reported by Pylint.
Line: 27
Column: 1
from cpp_api_parity.utils import TorchNNFunctionalTestParams, TORCH_NN_COMMON_TEST_HARNESS, \
compile_cpp_code_inline, set_python_tensors_requires_grad, move_python_tensors_to_device, \
add_test, compute_cpp_args_construction_stmts_and_forward_arg_symbols, serialize_arg_dict_as_script_module, \
compute_arg_dict, decorate_test_fn, compute_temp_file_path, generate_error_msg, is_torch_nn_functional_test, \
try_remove_folder
from cpp_api_parity.sample_functional import SAMPLE_FUNCTIONAL_CPP_SOURCE
# Expected substitutions:
#
Reported by Pylint.
Line: 61
Column: 1
}
""")
def run_forward(unit_test_class, test_params):
device = test_params.device
inputs = set_python_tensors_requires_grad(move_python_tensors_to_device(
[arg_value for _, arg_value in test_params.arg_dict['input']], device))
inputs += move_python_tensors_to_device(
Reported by Pylint.
Line: 79
Column: 1
return python_output
def test_forward(unit_test_class, test_params):
functional_variant_name = test_params.functional_variant_name
cpp_tmp_folder = test_params.cpp_tmp_folder
# Remove the temporary folder if it exists already
try_remove_folder(cpp_tmp_folder)
os.mkdir(cpp_tmp_folder)
Reported by Pylint.
Line: 97
Column: 1
cpp_test_fn = getattr(unit_test_class.functional_impl_check_cpp_module, cpp_test_name)
def run_cpp_test_fn_and_check_output():
forward_output_file_path = compute_temp_file_path(cpp_tmp_folder, functional_variant_name, 'forward_output')
cpp_test_fn(arg_dict_file_path, forward_output_file_path)
cpp_output = torch.load(forward_output_file_path)
# Check that forward outputs are equal
Reported by Pylint.
caffe2/python/examples/char_rnn.py
39 issues
Line: 19
Column: 1
import numpy as np
from datetime import datetime
'''
This script takes a text file as input and uses a recurrent neural network
to learn to predict next character in a sequence.
'''
logging.basicConfig()
Reported by Pylint.
Line: 31
Column: 1
# Default set() here is intentional as it would accumulate values like a global
# variable
def CreateNetOnce(net, created_names=set()): # noqa
name = net.Name()
if name not in created_names:
created_names.add(name)
workspace.CreateNet(net)
Reported by Pylint.
Line: 69
Column: 28
'target',
)
hidden_output_all, self.hidden_output, _, self.cell_state = LSTM(
model, input_blob, seq_lengths, (hidden_init, cell_init),
self.D, self.hidden_size, scope="LSTM")
output = brew.fc(
model,
hidden_output_all,
Reported by Pylint.
Line: 69
Column: 51
'target',
)
hidden_output_all, self.hidden_output, _, self.cell_state = LSTM(
model, input_blob, seq_lengths, (hidden_init, cell_init),
self.D, self.hidden_size, scope="LSTM")
output = brew.fc(
model,
hidden_output_all,
Reported by Pylint.
Line: 90
Column: 9
# Create a copy of the current net. We will use it on the forward
# pass where we don't need loss and backward operators
self.forward_net = core.Net(model.net.Proto())
xent = model.net.LabelCrossEntropy([softmax_reshaped, target], 'xent')
# Loss is average both across batch and through time
# Thats why the learning rate below is multiplied by self.seq_length
loss = model.net.AveragedLoss(xent, 'loss')
Reported by Pylint.
Line: 107
Column: 9
gamma=0.9999
)
self.model = model
self.predictions = softmax
self.loss = loss
self.prepare_state = core.Net("prepare_state")
self.prepare_state.Copy(self.hidden_output, hidden_init)
Reported by Pylint.
Line: 108
Column: 9
)
self.model = model
self.predictions = softmax
self.loss = loss
self.prepare_state = core.Net("prepare_state")
self.prepare_state.Copy(self.hidden_output, hidden_init)
self.prepare_state.Copy(self.cell_state, cell_init)
Reported by Pylint.
Line: 109
Column: 9
self.model = model
self.predictions = softmax
self.loss = loss
self.prepare_state = core.Net("prepare_state")
self.prepare_state.Copy(self.hidden_output, hidden_init)
self.prepare_state.Copy(self.cell_state, cell_init)
Reported by Pylint.
Line: 111
Column: 9
self.predictions = softmax
self.loss = loss
self.prepare_state = core.Net("prepare_state")
self.prepare_state.Copy(self.hidden_output, hidden_init)
self.prepare_state.Copy(self.cell_state, cell_init)
def _idx_at_pos(self, pos):
return self.char_to_idx[self.text[pos]]
Reported by Pylint.
Line: 161
Column: 13
)
workspace.RunNet(self.prepare_state.Name())
input = np.zeros(
[self.seq_length, self.batch_size, self.D]
).astype(np.float32)
target = np.zeros(
[self.seq_length * self.batch_size]
).astype(np.int32)
Reported by Pylint.
test/test_gen_backend_stubs.py
39 issues
Line: 4
Column: 1
import os
import tempfile
from torch.testing._internal.common_utils import TestCase, run_tests
import tools.codegen.gen_backend_stubs
path = os.path.dirname(os.path.realpath(__file__))
gen_backend_stubs_path = os.path.join(path, '../tools/codegen/gen_backend_stubs.py')
Reported by Pylint.
Line: 5
Column: 1
import tempfile
from torch.testing._internal.common_utils import TestCase, run_tests
import tools.codegen.gen_backend_stubs
path = os.path.dirname(os.path.realpath(__file__))
gen_backend_stubs_path = os.path.join(path, '../tools/codegen/gen_backend_stubs.py')
# gen_backend_stubs.py is an integration point that is called directly by external backends.
Reported by Pylint.
Line: 1
Column: 1
import os
import tempfile
from torch.testing._internal.common_utils import TestCase, run_tests
import tools.codegen.gen_backend_stubs
path = os.path.dirname(os.path.realpath(__file__))
gen_backend_stubs_path = os.path.join(path, '../tools/codegen/gen_backend_stubs.py')
Reported by Pylint.
Line: 12
Column: 1
# gen_backend_stubs.py is an integration point that is called directly by external backends.
# The tests here are to confirm that badly formed inputs result in reasonable error messages.
class TestGenBackendStubs(TestCase):
def assert_success_from_gen_backend_stubs(self, yaml_str: str) -> str:
with tempfile.NamedTemporaryFile(mode='w') as fp:
fp.write(yaml_str)
fp.flush()
Reported by Pylint.
Line: 14
Column: 5
# The tests here are to confirm that badly formed inputs result in reasonable error messages.
class TestGenBackendStubs(TestCase):
def assert_success_from_gen_backend_stubs(self, yaml_str: str) -> str:
with tempfile.NamedTemporaryFile(mode='w') as fp:
fp.write(yaml_str)
fp.flush()
tools.codegen.gen_backend_stubs.run(fp.name, '', True)
Reported by Pylint.
Line: 14
Column: 5
# The tests here are to confirm that badly formed inputs result in reasonable error messages.
class TestGenBackendStubs(TestCase):
def assert_success_from_gen_backend_stubs(self, yaml_str: str) -> str:
with tempfile.NamedTemporaryFile(mode='w') as fp:
fp.write(yaml_str)
fp.flush()
tools.codegen.gen_backend_stubs.run(fp.name, '', True)
Reported by Pylint.
Line: 15
Column: 55
class TestGenBackendStubs(TestCase):
def assert_success_from_gen_backend_stubs(self, yaml_str: str) -> str:
with tempfile.NamedTemporaryFile(mode='w') as fp:
fp.write(yaml_str)
fp.flush()
tools.codegen.gen_backend_stubs.run(fp.name, '', True)
def get_errors_from_gen_backend_stubs(self, yaml_str: str) -> str:
Reported by Pylint.
Line: 20
Column: 5
fp.flush()
tools.codegen.gen_backend_stubs.run(fp.name, '', True)
def get_errors_from_gen_backend_stubs(self, yaml_str: str) -> str:
with tempfile.NamedTemporaryFile(mode='w') as fp:
fp.write(yaml_str)
fp.flush()
try:
tools.codegen.gen_backend_stubs.run(fp.name, '', True)
Reported by Pylint.
Line: 20
Column: 5
fp.flush()
tools.codegen.gen_backend_stubs.run(fp.name, '', True)
def get_errors_from_gen_backend_stubs(self, yaml_str: str) -> str:
with tempfile.NamedTemporaryFile(mode='w') as fp:
fp.write(yaml_str)
fp.flush()
try:
tools.codegen.gen_backend_stubs.run(fp.name, '', True)
Reported by Pylint.
Line: 21
Column: 55
tools.codegen.gen_backend_stubs.run(fp.name, '', True)
def get_errors_from_gen_backend_stubs(self, yaml_str: str) -> str:
with tempfile.NamedTemporaryFile(mode='w') as fp:
fp.write(yaml_str)
fp.flush()
try:
tools.codegen.gen_backend_stubs.run(fp.name, '', True)
except AssertionError as e:
Reported by Pylint.
test/jit/test_hash.py
39 issues
Line: 4
Column: 1
import os
import sys
import torch
from typing import Tuple, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 11
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
import torch
from typing import Tuple, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 6
Column: 1
import torch
from typing import Tuple, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 11
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 18
Column: 1
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestHash(JitTestCase):
def test_hash_tuple(self):
def fn(t1: Tuple[int, int], t2: Tuple[int, int]) -> bool:
return hash(t1) == hash(t2)
self.checkScript(fn, ((1, 2), (1, 2)))
Reported by Pylint.
Line: 19
Column: 5
"instead.")
class TestHash(JitTestCase):
def test_hash_tuple(self):
def fn(t1: Tuple[int, int], t2: Tuple[int, int]) -> bool:
return hash(t1) == hash(t2)
self.checkScript(fn, ((1, 2), (1, 2)))
self.checkScript(fn, ((1, 2), (3, 4)))
Reported by Pylint.
Line: 20
Column: 9
class TestHash(JitTestCase):
def test_hash_tuple(self):
def fn(t1: Tuple[int, int], t2: Tuple[int, int]) -> bool:
return hash(t1) == hash(t2)
self.checkScript(fn, ((1, 2), (1, 2)))
self.checkScript(fn, ((1, 2), (3, 4)))
self.checkScript(fn, ((1, 2), (2, 1)))
Reported by Pylint.
Line: 20
Column: 9
class TestHash(JitTestCase):
def test_hash_tuple(self):
def fn(t1: Tuple[int, int], t2: Tuple[int, int]) -> bool:
return hash(t1) == hash(t2)
self.checkScript(fn, ((1, 2), (1, 2)))
self.checkScript(fn, ((1, 2), (3, 4)))
self.checkScript(fn, ((1, 2), (2, 1)))
Reported by Pylint.
Line: 20
Column: 9
class TestHash(JitTestCase):
def test_hash_tuple(self):
def fn(t1: Tuple[int, int], t2: Tuple[int, int]) -> bool:
return hash(t1) == hash(t2)
self.checkScript(fn, ((1, 2), (1, 2)))
self.checkScript(fn, ((1, 2), (3, 4)))
self.checkScript(fn, ((1, 2), (2, 1)))
Reported by Pylint.
torch/fx/experimental/graph_manipulation.py
39 issues
Line: 93
Column: 31
total_num_of_elems += output_elem
# Assume for now if it's quantized then it's qint8 or quint8
if tensor_meta.is_quantized:
size_per_elem_bytes = torch._empty_affine_quantized(
[], dtype=tensor_meta.dtype
).element_size()
else:
size_per_elem_bytes = torch.tensor([], dtype=tensor_meta.dtype).element_size()
total_size = size_per_elem_bytes * total_num_of_elems
Reported by Pylint.
Line: 97
Column: 31
[], dtype=tensor_meta.dtype
).element_size()
else:
size_per_elem_bytes = torch.tensor([], dtype=tensor_meta.dtype).element_size()
total_size = size_per_elem_bytes * total_num_of_elems
output_size = size_per_elem_bytes * output_elem
return size_bytes(output_size, total_size)
Reported by Pylint.
Line: 103
Column: 28
return size_bytes(output_size, total_size)
def serialize_shape(shape: torch.Size) -> str:
return str(list(shape))
def serialize_stride(stride: Tuple[int]) -> str:
return str(list(stride))
Reported by Pylint.
Line: 169
Column: 54
scheme["qscheme"] = str(tensor.qscheme())
# For per tensor scheme, we stores scale and zero_point.
if tensor.qscheme() in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
scheme["q_scale"] = tensor.q_scale()
scheme["q_zero_point"] = tensor.q_zero_point()
# For per channel scheme, per_channel_scales and per_channel_zero_points are tensors.
# We store their tensor value into `weights` and store the name into `scheme`.
Reported by Pylint.
Line: 169
Column: 29
scheme["qscheme"] = str(tensor.qscheme())
# For per tensor scheme, we stores scale and zero_point.
if tensor.qscheme() in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
scheme["q_scale"] = tensor.q_scale()
scheme["q_zero_point"] = tensor.q_zero_point()
# For per channel scheme, per_channel_scales and per_channel_zero_points are tensors.
# We store their tensor value into `weights` and store the name into `scheme`.
Reported by Pylint.
Line: 176
Column: 9
# For per channel scheme, per_channel_scales and per_channel_zero_points are tensors.
# We store their tensor value into `weights` and store the name into `scheme`.
if tensor.qscheme() in {
torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
torch.per_channel_symmetric,
}:
# per_channel_scales is float64. Here we save it as float32.
weights[
Reported by Pylint.
Line: 177
Column: 9
# We store their tensor value into `weights` and store the name into `scheme`.
if tensor.qscheme() in {
torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
torch.per_channel_symmetric,
}:
# per_channel_scales is float64. Here we save it as float32.
weights[
f"{pcq_prefix}_per_channel_scales"
Reported by Pylint.
Line: 178
Column: 9
if tensor.qscheme() in {
torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
torch.per_channel_symmetric,
}:
# per_channel_scales is float64. Here we save it as float32.
weights[
f"{pcq_prefix}_per_channel_scales"
] = tensor.q_per_channel_scales().float()
Reported by Pylint.
Line: 316
Column: 17
node_rep["qscheme"] = str(tensor_meta.qscheme)
if tensor_meta.qscheme in {
torch.per_tensor_affine,
torch.per_tensor_symmetric,
}:
node_rep["q_scale"] = tensor_meta.q_scale
node_rep["q_zero_point"] = tensor_meta.q_zero_point
Reported by Pylint.
Line: 317
Column: 17
if tensor_meta.qscheme in {
torch.per_tensor_affine,
torch.per_tensor_symmetric,
}:
node_rep["q_scale"] = tensor_meta.q_scale
node_rep["q_zero_point"] = tensor_meta.q_zero_point
return node_rep
Reported by Pylint.
torch/quantization/qconfig.py
39 issues
Line: 2
Column: 1
from collections import namedtuple
from .observer import (HistogramObserver, MovingAverageMinMaxObserver,
PlaceholderObserver, default_debug_observer,
default_dynamic_quant_observer,
default_float_qparams_observer, default_observer,
default_per_channel_weight_observer,
default_placeholder_observer, default_weight_observer)
from .fake_quantize import (FakeQuantize, default_fake_quant,
default_per_channel_weight_fake_quant,
Reported by Pylint.
Line: 8
Column: 1
default_float_qparams_observer, default_observer,
default_per_channel_weight_observer,
default_placeholder_observer, default_weight_observer)
from .fake_quantize import (FakeQuantize, default_fake_quant,
default_per_channel_weight_fake_quant,
default_weight_fake_quant, default_fused_act_fake_quant, default_fused_wt_fake_quant,
FusedMovingAvgObsFakeQuantize, default_fused_per_channel_wt_fake_quant)
import torch
import torch.nn as nn
Reported by Pylint.
Line: 76
Column: 89
default_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=default_weight_observer)
float16_dynamic_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float32),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
float16_static_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float16),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
per_channel_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=default_per_channel_weight_observer)
Reported by Pylint.
Line: 77
Column: 85
default_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=default_weight_observer)
float16_dynamic_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float32),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
float16_static_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float16),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
per_channel_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=default_per_channel_weight_observer)
Reported by Pylint.
Line: 78
Column: 88
weight=default_weight_observer)
float16_dynamic_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float32),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
float16_static_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float16),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
per_channel_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=default_per_channel_weight_observer)
# TODO: this is weight only quant, change this to QConfigWeightOnly
Reported by Pylint.
Line: 79
Column: 84
float16_dynamic_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float32),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
float16_static_qconfig = QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float16),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
per_channel_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=default_per_channel_weight_observer)
# TODO: this is weight only quant, change this to QConfigWeightOnly
# or remove the QConfigDynamic later
Reported by Pylint.
Line: 83
Column: 3
per_channel_dynamic_qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=default_per_channel_weight_observer)
# TODO: this is weight only quant, change this to QConfigWeightOnly
# or remove the QConfigDynamic later
float_qparams_weight_only_qconfig = QConfigDynamic(
activation=default_placeholder_observer,
weight=default_float_qparams_observer)
Reported by Pylint.
Line: 1
Column: 1
from collections import namedtuple
from .observer import (HistogramObserver, MovingAverageMinMaxObserver,
PlaceholderObserver, default_debug_observer,
default_dynamic_quant_observer,
default_float_qparams_observer, default_observer,
default_per_channel_weight_observer,
default_placeholder_observer, default_weight_observer)
from .fake_quantize import (FakeQuantize, default_fake_quant,
default_per_channel_weight_fake_quant,
Reported by Pylint.
Line: 10
Column: 1
default_placeholder_observer, default_weight_observer)
from .fake_quantize import (FakeQuantize, default_fake_quant,
default_per_channel_weight_fake_quant,
default_weight_fake_quant, default_fused_act_fake_quant, default_fused_wt_fake_quant,
FusedMovingAvgObsFakeQuantize, default_fused_per_channel_wt_fake_quant)
import torch
import torch.nn as nn
from typing import Union, Optional, Any
Reported by Pylint.
Line: 12
Column: 1
default_per_channel_weight_fake_quant,
default_weight_fake_quant, default_fused_act_fake_quant, default_fused_wt_fake_quant,
FusedMovingAvgObsFakeQuantize, default_fused_per_channel_wt_fake_quant)
import torch
import torch.nn as nn
from typing import Union, Optional, Any
class QConfig(namedtuple('QConfig', ['activation', 'weight'])):
Reported by Pylint.