The following issues were found
benchmarks/fastrnns/cells.py
52 issues
Line: 1
Column: 1
import torch
from typing import Tuple
from torch import Tensor
def milstm_cell(x, hx, cx, w_ih, w_hh, alpha, beta_i, beta_h, bias):
Wx = x.mm(w_ih.t())
Uz = hx.mm(w_hh.t())
Reported by Pylint.
Line: 3
Column: 1
import torch
from typing import Tuple
from torch import Tensor
def milstm_cell(x, hx, cx, w_ih, w_hh, alpha, beta_i, beta_h, bias):
Wx = x.mm(w_ih.t())
Uz = hx.mm(w_hh.t())
Reported by Pylint.
Line: 27
Column: 15
return hy, cy
def lstm_cell(input: Tensor, hidden: Tuple[Tensor, Tensor], w_ih: Tensor,
w_hh: Tensor, b_ih: Tensor, b_hh: Tensor) -> Tuple[Tensor, Tensor]:
hx, cx = hidden
gates = torch.mm(input, w_ih.t()) + torch.mm(hx, w_hh.t()) + b_ih + b_hh
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
Reported by Pylint.
Line: 45
Column: 20
return hy, cy
def flat_lstm_cell(input: Tensor, hx: Tensor, cx: Tensor, w_ih: Tensor,
w_hh: Tensor, b_ih: Tensor, b_hh: Tensor) -> Tuple[Tensor, Tensor]:
gates = torch.mm(input, w_ih.t()) + torch.mm(hx, w_hh.t()) + b_ih + b_hh
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
Reported by Pylint.
Line: 97
Column: 14
return hy, cy
def gru_cell(input, hidden, w_ih, w_hh, b_ih, b_hh):
gi = torch.mm(input, w_ih.t()) + b_ih
gh = torch.mm(hidden, w_hh.t()) + b_hh
i_r, i_i, i_n = gi.chunk(3, 1)
h_r, h_i, h_n = gh.chunk(3, 1)
Reported by Pylint.
Line: 111
Column: 19
return hy
def rnn_relu_cell(input, hidden, w_ih, w_hh, b_ih, b_hh):
igates = torch.mm(input, w_ih.t()) + b_ih
hgates = torch.mm(hidden, w_hh.t()) + b_hh
return torch.relu(igates + hgates)
Reported by Pylint.
Line: 117
Column: 19
return torch.relu(igates + hgates)
def rnn_tanh_cell(input, hidden, w_ih, w_hh, b_ih, b_hh):
igates = torch.mm(input, w_ih.t()) + b_ih
hgates = torch.mm(hidden, w_hh.t()) + b_hh
return torch.tanh(igates + hgates)
Reported by Pylint.
Line: 1
Column: 1
import torch
from typing import Tuple
from torch import Tensor
def milstm_cell(x, hx, cx, w_ih, w_hh, alpha, beta_i, beta_h, bias):
Wx = x.mm(w_ih.t())
Uz = hx.mm(w_hh.t())
Reported by Pylint.
Line: 2
Column: 1
import torch
from typing import Tuple
from torch import Tensor
def milstm_cell(x, hx, cx, w_ih, w_hh, alpha, beta_i, beta_h, bias):
Wx = x.mm(w_ih.t())
Uz = hx.mm(w_hh.t())
Reported by Pylint.
Line: 6
Column: 1
from torch import Tensor
def milstm_cell(x, hx, cx, w_ih, w_hh, alpha, beta_i, beta_h, bias):
Wx = x.mm(w_ih.t())
Uz = hx.mm(w_hh.t())
# Section 2.1 in https://arxiv.org/pdf/1606.06630.pdf
gates = (alpha * Wx * Uz + beta_i * Wx + beta_h * Uz + bias)
Reported by Pylint.
caffe2/python/operator_test/roi_align_rotated_op_test.py
52 issues
Line: 7
Column: 1
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import copy
Reported by Pylint.
Line: 9
Column: 1
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import copy
class RoIAlignRotatedOp(hu.HypothesisTestCase):
Reported by Pylint.
Line: 125
Column: 48
if axes[0] == axes[1] or np.absolute(axes[0] - axes[1]) == m.ndim:
raise ValueError("Axes must be different.")
if (axes[0] >= m.ndim or axes[0] < -m.ndim or
axes[1] >= m.ndim or axes[1] < -m.ndim):
raise ValueError(
"Axes={} out of range for array of ndim={}.".format(axes, m.ndim))
k %= 4
Reported by Pylint.
Line: 126
Column: 52
raise ValueError("Axes must be different.")
if (axes[0] >= m.ndim or axes[0] < -m.ndim or
axes[1] >= m.ndim or axes[1] < -m.ndim):
raise ValueError(
"Axes={} out of range for array of ndim={}.".format(axes, m.ndim))
k %= 4
Reported by Pylint.
Line: 34
Column: 72
pooled_size=st.sampled_from([7, 14]),
**hu.gcs
)
def test_horizontal_rois(self, H, W, C, num_rois, pooled_size, gc, dc):
"""
Test that results match with RoIAlign when angle=0.
"""
X = np.random.randn(1, C, H, W).astype(np.float32)
R = np.zeros((num_rois, 6)).astype(np.float32)
Reported by Pylint.
Line: 92
Column: 58
**hu.gcs
)
def test_simple_rotations(
self, H, W, C, num_rois, pooled_size, angle, gc, dc
):
"""
Test with right-angled rotations that don't need interpolation.
"""
X = np.random.randn(1, C, H, W).astype(np.float32)
Reported by Pylint.
Line: 154
Column: 17
try:
indexer[axis] = slice(None, None, -1)
except IndexError:
raise ValueError("axis=%i is invalid for the %i-dimensional input array"
% (axis, m.ndim))
return m[tuple(indexer)]
def roialign_ref(X, R):
# `angle` denotes counter-clockwise rotation. Rotate the input
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
Reported by Pylint.
Line: 11
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import copy
class RoIAlignRotatedOp(hu.HypothesisTestCase):
def bbox_xywh_to_xyxy(self, boxes):
"""
Reported by Pylint.
Line: 14
Column: 1
import copy
class RoIAlignRotatedOp(hu.HypothesisTestCase):
def bbox_xywh_to_xyxy(self, boxes):
"""
Convert from [center_x center_y w h] format to [x1 y1 x2 y2].
"""
w, h = boxes[:, 2], boxes[:, 3]
Reported by Pylint.
torch/quantization/utils.py
52 issues
Line: 7
Column: 1
import warnings
import functools
import torch
from .quant_type import QuantType, quant_type_to_str
from typing import Tuple, Any
def get_combined_dict(default_dict, additional_dict):
d = default_dict.copy()
d.update(additional_dict)
Reported by Pylint.
Line: 16
Column: 23
return d
def is_per_tensor(qscheme):
return qscheme == torch.per_tensor_affine or \
qscheme == torch.per_tensor_symmetric
def is_per_channel(qscheme):
return qscheme in [torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
Reported by Pylint.
Line: 17
Column: 20
def is_per_tensor(qscheme):
return qscheme == torch.per_tensor_affine or \
qscheme == torch.per_tensor_symmetric
def is_per_channel(qscheme):
return qscheme in [torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
torch.per_channel_symmetric]
Reported by Pylint.
Line: 20
Column: 24
qscheme == torch.per_tensor_symmetric
def is_per_channel(qscheme):
return qscheme in [torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
torch.per_channel_symmetric]
def getattr_from_fqn(obj: Any, fqn: str) -> Any:
"""
Reported by Pylint.
Line: 21
Column: 24
def is_per_channel(qscheme):
return qscheme in [torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
torch.per_channel_symmetric]
def getattr_from_fqn(obj: Any, fqn: str) -> Any:
"""
Given an obj and a fqn such as "foo.bar.baz", returns gm.foo.bar.baz.
Reported by Pylint.
Line: 22
Column: 24
def is_per_channel(qscheme):
return qscheme in [torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
torch.per_channel_symmetric]
def getattr_from_fqn(obj: Any, fqn: str) -> Any:
"""
Given an obj and a fqn such as "foo.bar.baz", returns gm.foo.bar.baz.
"""
Reported by Pylint.
Line: 39
Column: 19
return qparams
if is_per_tensor(qscheme):
qscheme = torch.per_tensor_affine
elif is_per_channel(qscheme):
# change symmetric to affine since we do not have symmetric
# quantized Tensor
if qscheme == torch.per_channel_symmetric:
qscheme = torch.per_channel_affine
Reported by Pylint.
Line: 43
Column: 23
elif is_per_channel(qscheme):
# change symmetric to affine since we do not have symmetric
# quantized Tensor
if qscheme == torch.per_channel_symmetric:
qscheme = torch.per_channel_affine
qparams["axis"] = observer_or_fake_quant.ch_axis
else:
raise RuntimeError(f"Unrecognized qscheme: {qscheme}")
# update qscheme, since we don't have symmetric quant qscheme
Reported by Pylint.
Line: 44
Column: 23
# change symmetric to affine since we do not have symmetric
# quantized Tensor
if qscheme == torch.per_channel_symmetric:
qscheme = torch.per_channel_affine
qparams["axis"] = observer_or_fake_quant.ch_axis
else:
raise RuntimeError(f"Unrecognized qscheme: {qscheme}")
# update qscheme, since we don't have symmetric quant qscheme
# in quantized Tensor
Reported by Pylint.
Line: 91
Column: 42
""" Given a qconfig, decide if the activation needs to be
quantized or not, this includes quantizing to quint8, qint8 and float16
"""
return activation_dtype(qconfig) in [torch.quint8, torch.qint8, torch.float16]
def activation_is_int8_quantized(qconfig):
""" Given a qconfig, decide if the activation needs to be
quantized to int8 or not, this includes quantizing to quint8, qint8
"""
Reported by Pylint.
torch/nn/utils/rnn.py
52 issues
Line: 6
Column: 1
import torch
from torch import Tensor
from ... import _VF
from ..._jit_internal import Optional
from typing import List, Tuple
Reported by Pylint.
Line: 7
Column: 1
import torch
from torch import Tensor
from ... import _VF
from ..._jit_internal import Optional
from typing import List, Tuple
Reported by Pylint.
Line: 82
Column: 14
def cuda(self, *args, **kwargs):
# Tests to see if 'cuda' should be added to kwargs
ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs)
if ex.is_cuda:
return self.to(*args, **kwargs)
return self.to(*args, device='cuda', **kwargs)
def cpu(self, *args, **kwargs):
Reported by Pylint.
Line: 89
Column: 14
def cpu(self, *args, **kwargs):
ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs)
if ex.device.type == 'cpu':
return self.to(*args, **kwargs)
return self.to(*args, device='cpu', **kwargs)
def double(self):
Reported by Pylint.
Line: 95
Column: 30
return self.to(*args, device='cpu', **kwargs)
def double(self):
return self.to(dtype=torch.double)
def float(self):
return self.to(dtype=torch.float)
def half(self):
Reported by Pylint.
Line: 98
Column: 30
return self.to(dtype=torch.double)
def float(self):
return self.to(dtype=torch.float)
def half(self):
return self.to(dtype=torch.half)
def long(self):
Reported by Pylint.
Line: 101
Column: 30
return self.to(dtype=torch.float)
def half(self):
return self.to(dtype=torch.half)
def long(self):
return self.to(dtype=torch.long)
def int(self):
Reported by Pylint.
Line: 104
Column: 30
return self.to(dtype=torch.half)
def long(self):
return self.to(dtype=torch.long)
def int(self):
return self.to(dtype=torch.int)
def short(self):
Reported by Pylint.
Line: 107
Column: 30
return self.to(dtype=torch.long)
def int(self):
return self.to(dtype=torch.int)
def short(self):
return self.to(dtype=torch.short)
def char(self):
Reported by Pylint.
Line: 110
Column: 30
return self.to(dtype=torch.int)
def short(self):
return self.to(dtype=torch.short)
def char(self):
return self.to(dtype=torch.int8)
def byte(self):
Reported by Pylint.
test/test_multiprocessing_spawn.py
52 issues
Line: 9
Column: 1
import time
import unittest
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN)
import torch.multiprocessing as mp
def test_success_func(i):
pass
Reported by Pylint.
Line: 10
Column: 1
import unittest
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN)
import torch.multiprocessing as mp
def test_success_func(i):
pass
Reported by Pylint.
Line: 100
Column: 9
# After all processes (nproc=2) have joined it must return True
mp_context.join(timeout=None)
mp_context.join(timeout=None)
self.assertTrue(mp_context.join(timeout=None))
def test_first_argument_index(self):
context = mp.get_context(self.start_method)
queue = context.SimpleQueue()
mp.start_processes(test_success_single_arg_func, args=(queue,), nprocs=2, start_method=self.start_method)
Reported by Pylint.
Line: 106
Column: 9
context = mp.get_context(self.start_method)
queue = context.SimpleQueue()
mp.start_processes(test_success_single_arg_func, args=(queue,), nprocs=2, start_method=self.start_method)
self.assertEqual([0, 1], sorted([queue.get(), queue.get()]))
def test_exception_single(self):
nprocs = 2
for i in range(nprocs):
with self.assertRaisesRegex(
Reported by Pylint.
Line: 111
Column: 18
def test_exception_single(self):
nprocs = 2
for i in range(nprocs):
with self.assertRaisesRegex(
Exception,
"\nValueError: legitimate exception from process %d$" % i,
):
mp.start_processes(test_exception_single_func, args=(i,), nprocs=nprocs, start_method=self.start_method)
Reported by Pylint.
Line: 118
Column: 14
mp.start_processes(test_exception_single_func, args=(i,), nprocs=nprocs, start_method=self.start_method)
def test_exception_all(self):
with self.assertRaisesRegex(
Exception,
"\nValueError: legitimate exception from process (0|1)$",
):
mp.start_processes(test_exception_all_func, nprocs=2, start_method=self.start_method)
Reported by Pylint.
Line: 136
Column: 14
if IS_WINDOWS:
message = "process 0 terminated with exit code 22"
with self.assertRaisesRegex(Exception, message):
mp.start_processes(test_terminate_signal_func, nprocs=2, start_method=self.start_method)
def test_terminate_exit(self):
exitcode = 123
with self.assertRaisesRegex(
Reported by Pylint.
Line: 141
Column: 14
def test_terminate_exit(self):
exitcode = 123
with self.assertRaisesRegex(
Exception,
"process 0 terminated with exit code %d" % exitcode,
):
mp.start_processes(test_terminate_exit_func, args=(exitcode,), nprocs=2, start_method=self.start_method)
Reported by Pylint.
Line: 149
Column: 14
def test_success_first_then_exception(self):
exitcode = 123
with self.assertRaisesRegex(
Exception,
"ValueError: legitimate exception",
):
mp.start_processes(test_success_first_then_exception_func, args=(exitcode,), nprocs=2, start_method=self.start_method)
Reported by Pylint.
Line: 187
Column: 13
# alive after (nested_child_sleep / 2) seconds. By
# extension, this test times out with an assertion error
# after (nested_child_sleep / 2) seconds.
self.assertLess(time.time() - start, nested_child_sleep / 2)
time.sleep(0.1)
@unittest.skipIf(
NO_MULTIPROCESSING_SPAWN,
"Disabled for environments that don't support the spawn start method")
Reported by Pylint.
caffe2/python/ideep/transform_ideep_net.py
51 issues
Line: 191
Column: 9
if len(blob_uses(net, current.output[0])) != 1:
raise Exception("Failure to fuse")
log.info("Fusing at index %s", i)
mul_ = current
add_ = next_
batch_norm = copy.deepcopy(mul_)
batch_norm.type = "SpatialBN"
batch_norm.arg.extend([utils.MakeArgument("is_test", 1)])
Reported by Pylint.
Line: 279
Column: 29
op.device_option.CopyFrom(device_option)
new_net = caffe2_pb2.NetDef()
new_net.ParseFromString(C.transform_optimizeForMKLDNN(net.SerializeToString()))
return new_net
def Optimize(args):
init_net = caffe2_pb2.NetDef()
Reported by Pylint.
Line: 29
Column: 5
for (i, op) in reversed(list(enumerate(ops))):
if blob in op.output:
return i
raise ValueError("Failed to find last producer of blob, %s", blob)
def blob_uses(net, blob):
u = []
for i, op in enumerate(net.op):
Reported by Pylint.
Line: 140
Column: 9
A_ = A.reshape(-1, 1, 1, 1) if conv.type == "Conv" else \
A.reshape(1, -1, 1, 1)
C = conv_bias * A + B
Q = conv_weight * A_
params[fused_conv.input[1]] = Q
params[fused_conv.input[2]] = C
new_ops = net.op[:i] + [fused_conv] + net.op[j + 1:]
Reported by Pylint.
Line: 171
Column: 17
any(op.type == "SpatialBN" for op in next_net.op) and
not ignore_failure
):
raise Exception(
"Model contains SpatialBN op after fusion: %s", next_net)
return (next_net, next_params, removed_tensors)
net, params, removed_tensors = (next_net, next_params, removed_tensors)
Reported by Pylint.
Line: 174
Column: 22
raise Exception(
"Model contains SpatialBN op after fusion: %s", next_net)
return (next_net, next_params, removed_tensors)
net, params, removed_tensors = (next_net, next_params, removed_tensors)
def fuse_first_mul_add(net, params, removed_tensors):
net = copy.deepcopy(net)
params = copy.deepcopy(params)
Reported by Pylint.
Line: 200
Column: 34
batch_norm.arg.extend([utils.MakeArgument("epsilon", float(1e-9))])
def s(x):
return "{}{}".format(add_.output[0], x)
fake_mean = s("_mean")
fake_var = s("_var")
del batch_norm.input[:]
batch_norm.input.extend([mul_.input[0],
Reported by Pylint.
Line: 230
Column: 22
fuse_first_mul_add(net, params, removed_tensors)
if len(next_net.op) == len(net.op):
return (next_net, next_params, removed_tensors)
net, params, removed_tensors = (next_net, next_params, removed_tensors)
def add_tensor(net, name, blob):
''' Create an operator to store the tensor 'blob',
run the operator to put the blob to workspace.
Reported by Pylint.
Line: 283
Column: 14
return new_net
def Optimize(args):
init_net = caffe2_pb2.NetDef()
predict_net = caffe2_pb2.NetDef()
init_net.ParseFromString(args.init_net.read())
predict_net.ParseFromString(args.pred_net.read())
Reported by Pylint.
Line: 1
Column: 1
import argparse
import copy
import json
Reported by Pylint.
tools/autograd/gen_trace_type.py
51 issues
Line: 47
Column: 3
'convolution', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d',
'conv_transpose2d', 'conv_transpose3d', 'lstm_cell', 'gru_cell',
'rnn_tanh_cell', 'rnn_relu_cell',
# FIXME: figure out a better way when we support sparse tensors in jit
'_coalesced',
}
def should_trace(f: NativeFunction) -> bool:
# Operations involving Storage or Type are not traceable at the moment
Reported by Pylint.
Line: 81
Column: 3
}
def format_trace_op_name(f: NativeFunction) -> str:
# TODO: byte-for-byte compatible with old codegen behavior - should clean up
if f.func.kind() in (SchemaKind.functional, SchemaKind.out) or f.func.name.name.dunder_method:
# special case for *_out functions: the in-place and out-of-place ops
# are overloaded with the same name in the JIT
trace_name = str(f.func.name.name)
trace_name = RENAME_TRACE.get(trace_name, trace_name)
Reported by Pylint.
Line: 128
Column: 3
# *_out functions take the result as a separate argument, but we don't want to
# trace that argument directly. Instead, we trace its TensorOptions.
# So first, we need to remove the out argument from the list of arguments to trace.
# TODO: byte-for-byte compatible with old codegen behavior - it's incorrect to assume
# there is only one output argument.
args = args[:-1]
trace_inputs = itertools.chain.from_iterable(dispatch_trace_input(arg) for arg in args)
Reported by Pylint.
Line: 137
Column: 3
if f.func.is_out_fn():
# for *_out functions, handle the result argument differently for inplace/outplace.
# For inplace: just add the input to the end to confirm with the JIT schema
name = f.func.arguments.out[0].name # TODO: old codegen behavior - should fix
inplace = ADD_TRACE_INPUT.substitute(name=name, input=name)
# for outplace: do nothing, except if the function is a factory.
# Factories are a bit special because their out-of-place overloads
# take an extra TensorOptions argument, which is missing in the _out function
Reported by Pylint.
Line: 236
Column: 3
if not should_trace(f):
return ''
# TODO: clean up old codegen behavior
is_inplace = f.func.kind() in (SchemaKind.inplace, SchemaKind.out) and not f.func.name.name.dunder_method
add_args = RENAME_TRACE_ADD_ARGS.get(f.func.name.name.base, '') if is_inplace else ''
additional_inputs = SELECT.substitute(
cond='tracer_state->force_outplace',
true=add_args,
Reported by Pylint.
Line: 1
Column: 1
import itertools
from typing import List, Sequence, Union, Dict
from tools.codegen.api.types import CppSignatureGroup, DispatcherSignature
from tools.codegen.api import cpp
from tools.codegen.code_template import CodeTemplate
from tools.codegen.context import with_native_function
from tools.codegen.gen import parse_native_yaml, FileManager
from tools.codegen.model import (Argument, NativeFunction, SchemaKind,
Reported by Pylint.
Line: 51
Column: 1
'_coalesced',
}
def should_trace(f: NativeFunction) -> bool:
# Operations involving Storage or Type are not traceable at the moment
if any(str(arg.type) in {'Storage', 'Type', 'ConstQuantizerPtr'}
for arg in f.func.schema_order_arguments()):
return False
# We can't trace functions which don't have any Tensor or TensorList returns
Reported by Pylint.
Line: 51
Column: 1
'_coalesced',
}
def should_trace(f: NativeFunction) -> bool:
# Operations involving Storage or Type are not traceable at the moment
if any(str(arg.type) in {'Storage', 'Type', 'ConstQuantizerPtr'}
for arg in f.func.schema_order_arguments()):
return False
# We can't trace functions which don't have any Tensor or TensorList returns
Reported by Pylint.
Line: 80
Column: 1
'fill': 'full_like', # replacing aten::fill_ with aten::full_like
}
def format_trace_op_name(f: NativeFunction) -> str:
# TODO: byte-for-byte compatible with old codegen behavior - should clean up
if f.func.kind() in (SchemaKind.functional, SchemaKind.out) or f.func.name.name.dunder_method:
# special case for *_out functions: the in-place and out-of-place ops
# are overloaded with the same name in the JIT
trace_name = str(f.func.name.name)
Reported by Pylint.
Line: 80
Column: 1
'fill': 'full_like', # replacing aten::fill_ with aten::full_like
}
def format_trace_op_name(f: NativeFunction) -> str:
# TODO: byte-for-byte compatible with old codegen behavior - should clean up
if f.func.kind() in (SchemaKind.functional, SchemaKind.out) or f.func.name.name.dunder_method:
# special case for *_out functions: the in-place and out-of-place ops
# are overloaded with the same name in the JIT
trace_name = str(f.func.name.name)
Reported by Pylint.
benchmarks/operator_benchmark/pt/qconv_test.py
51 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.quantized as nnq
from pt import configs
"""
Microbenchmarks for qConv operators.
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.quantized as nnq
from pt import configs
"""
Microbenchmarks for qConv operators.
Reported by Pylint.
Line: 6
Column: 1
import torch
import torch.nn.quantized as nnq
from pt import configs
"""
Microbenchmarks for qConv operators.
"""
Reported by Pylint.
Line: 12
Column: 24
Microbenchmarks for qConv operators.
"""
class QConv1dBenchmark(op_bench.TorchBenchmarkBase):
# def init(self, N, IC, OC, L, G, kernel, stride, pad):
def init(self, IC, OC, kernel, stride, N, L, device):
G = 1
pad = 0
self.scale = 1.0 / 255
Reported by Pylint.
Line: 41
Column: 24
return self.qconv1d(input)
class QConv2dBenchmark(op_bench.TorchBenchmarkBase):
# def init(self, N, IC, OC, H, W, G, kernel, stride, pad):
def init(self, IC, OC, kernel, stride, N, H, W, G, pad, device):
# super(QConv2dBenchmark, self).init(N, IC, OC, (H, W), G, (kernel, kernel), stride, pad)
self.scale = 1.0 / 255
Reported by Pylint.
Line: 70
Column: 1
return self.qconv2d(input)
op_bench.generate_pt_test(configs.remove_cuda(configs.conv_1d_configs_short + configs.conv_1d_configs_long), QConv1dBenchmark)
op_bench.generate_pt_test(configs.remove_cuda(configs.conv_2d_configs_short + configs.conv_2d_configs_long), QConv2dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 71
Column: 1
op_bench.generate_pt_test(configs.remove_cuda(configs.conv_1d_configs_short + configs.conv_1d_configs_long), QConv1dBenchmark)
op_bench.generate_pt_test(configs.remove_cuda(configs.conv_2d_configs_short + configs.conv_2d_configs_long), QConv2dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 8
Column: 1
from pt import configs
"""
Microbenchmarks for qConv operators.
"""
class QConv1dBenchmark(op_bench.TorchBenchmarkBase):
# def init(self, N, IC, OC, L, G, kernel, stride, pad):
Reported by Pylint.
Line: 14
Column: 50
class QConv1dBenchmark(op_bench.TorchBenchmarkBase):
# def init(self, N, IC, OC, L, G, kernel, stride, pad):
def init(self, IC, OC, kernel, stride, N, L, device):
G = 1
pad = 0
self.scale = 1.0 / 255
self.zero_point = 0
X = torch.randn(N, IC, L, dtype=torch.float32)
Reported by Pylint.
Line: 17
Column: 9
def init(self, IC, OC, kernel, stride, N, L, device):
G = 1
pad = 0
self.scale = 1.0 / 255
self.zero_point = 0
X = torch.randn(N, IC, L, dtype=torch.float32)
qX = torch.quantize_per_tensor(
X, scale=self.scale, zero_point=self.zero_point, dtype=torch.quint8
)
Reported by Pylint.
test/test_metal.py
51 issues
Line: 1
Column: 1
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class TestMetalRewritePass(TestCase):
@staticmethod
Reported by Pylint.
Line: 2
Column: 1
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class TestMetalRewritePass(TestCase):
@staticmethod
Reported by Pylint.
Line: 4
Column: 1
import torch
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class TestMetalRewritePass(TestCase):
@staticmethod
Reported by Pylint.
Line: 5
Column: 1
from torch.nn import functional as F
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing import FileCheck
import io
class TestMetalRewritePass(TestCase):
@staticmethod
def validate_transformed_module(
Reported by Pylint.
Line: 10
Column: 5
class TestMetalRewritePass(TestCase):
@staticmethod
def validate_transformed_module(
# To please flake
self,
pattern_count_map,
data_shape,
prepack_removal=False,
Reported by Pylint.
Line: 21
Column: 9
scripted_model = torch.jit.script(module_instance)
scripted_model.eval()
input_data = torch.normal(1, 20, size=data_shape)
ref_result = scripted_model(input_data)
torch._C._jit_pass_metal_insert_prepacked_ops(scripted_model._c)
if fuse_clamping_ops or prepack_removal:
scripted_model._c = torch._C._freeze_module(scripted_model._c)
if fuse_clamping_ops:
torch._C._jit_pass_metal_fuse_clamp_w_prepacked_conv(scripted_model._c)
Reported by Pylint.
Line: 22
Column: 9
scripted_model.eval()
input_data = torch.normal(1, 20, size=data_shape)
ref_result = scripted_model(input_data)
torch._C._jit_pass_metal_insert_prepacked_ops(scripted_model._c)
if fuse_clamping_ops or prepack_removal:
scripted_model._c = torch._C._freeze_module(scripted_model._c)
if fuse_clamping_ops:
torch._C._jit_pass_metal_fuse_clamp_w_prepacked_conv(scripted_model._c)
if prepack_removal:
Reported by Pylint.
Line: 22
Column: 9
scripted_model.eval()
input_data = torch.normal(1, 20, size=data_shape)
ref_result = scripted_model(input_data)
torch._C._jit_pass_metal_insert_prepacked_ops(scripted_model._c)
if fuse_clamping_ops or prepack_removal:
scripted_model._c = torch._C._freeze_module(scripted_model._c)
if fuse_clamping_ops:
torch._C._jit_pass_metal_fuse_clamp_w_prepacked_conv(scripted_model._c)
if prepack_removal:
Reported by Pylint.
Line: 22
Column: 55
scripted_model.eval()
input_data = torch.normal(1, 20, size=data_shape)
ref_result = scripted_model(input_data)
torch._C._jit_pass_metal_insert_prepacked_ops(scripted_model._c)
if fuse_clamping_ops or prepack_removal:
scripted_model._c = torch._C._freeze_module(scripted_model._c)
if fuse_clamping_ops:
torch._C._jit_pass_metal_fuse_clamp_w_prepacked_conv(scripted_model._c)
if prepack_removal:
Reported by Pylint.
Line: 24
Column: 33
ref_result = scripted_model(input_data)
torch._C._jit_pass_metal_insert_prepacked_ops(scripted_model._c)
if fuse_clamping_ops or prepack_removal:
scripted_model._c = torch._C._freeze_module(scripted_model._c)
if fuse_clamping_ops:
torch._C._jit_pass_metal_fuse_clamp_w_prepacked_conv(scripted_model._c)
if prepack_removal:
torch._C._jit_pass_metal_fold_prepacking_ops(scripted_model._c)
Reported by Pylint.
caffe2/python/operator_test/fused_nbit_rowwise_conversion_ops_test.py
51 issues
Line: 7
Column: 1
import struct
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.operator_test.fused_nbit_rowwise_test_helper import (
_compress_uniform_simplified,
param_search_greedy,
Reported by Pylint.
Line: 14
Column: 1
_compress_uniform_simplified,
param_search_greedy,
)
from hypothesis import assume, given, settings
# Eigen/Python round 0.5 away from 0, Numpy rounds to even
round_to_nearest = np.vectorize(round)
Reported by Pylint.
Line: 318
Column: 9
minmax_dequantized_data = workspace.FetchBlob("minmax_dequantized_data")
greedy_dequantized_data = workspace.FetchBlob("greedy_dequantized_data")
diff_minmax = np.abs(input_data - minmax_dequantized_data)
l2_minmax = np.linalg.norm(input_data - minmax_dequantized_data, axis=1)
diff_greedy = np.abs(input_data - greedy_dequantized_data)
l2_greedy = np.linalg.norm(input_data - greedy_dequantized_data, axis=1)
for i in range(input_data.shape[0]):
Reported by Pylint.
Line: 320
Column: 9
diff_minmax = np.abs(input_data - minmax_dequantized_data)
l2_minmax = np.linalg.norm(input_data - minmax_dequantized_data, axis=1)
diff_greedy = np.abs(input_data - greedy_dequantized_data)
l2_greedy = np.linalg.norm(input_data - greedy_dequantized_data, axis=1)
for i in range(input_data.shape[0]):
# Compare with Python reference greedy search implementation
xmin, xmax = param_search_greedy(
Reported by Pylint.
Line: 1
Column: 1
import math
import struct
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
Reported by Pylint.
Line: 21
Column: 1
round_to_nearest = np.vectorize(round)
def bytes_to_half_floats(byte_matrix):
floats = np.empty([np.shape(byte_matrix)[0], 1], dtype=np.float16)
for i, byte_values in enumerate(byte_matrix):
(floats[i],) = np.frombuffer(
memoryview(byte_values).tobytes(), dtype=np.float16
)
Reported by Pylint.
Line: 30
Column: 1
return floats
def half_floats_to_bytes(floats):
byte_matrix = np.empty([np.shape(floats)[0], 2], dtype=np.uint8)
for i, value in enumerate(floats):
assert isinstance(value, np.float16), (value, floats)
byte_matrix[i] = np.frombuffer(
memoryview(np.array([value])).tobytes(), dtype=np.uint8
Reported by Pylint.
Line: 33
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def half_floats_to_bytes(floats):
byte_matrix = np.empty([np.shape(floats)[0], 2], dtype=np.uint8)
for i, value in enumerate(floats):
assert isinstance(value, np.float16), (value, floats)
byte_matrix[i] = np.frombuffer(
memoryview(np.array([value])).tobytes(), dtype=np.uint8
)
return byte_matrix
Reported by Bandit.
Line: 40
Column: 1
return byte_matrix
def int8_to_bytes(int8s):
byte_matrix = np.empty([np.shape(int8s)[0], 1], dtype=np.uint8)
for i, value in enumerate(int8s):
assert isinstance(value, np.int8), (value, int8s)
as_bytes = struct.pack("b", value)
# In Python3 bytes will be a list of int, in Python2 a list of string
Reported by Pylint.
Line: 43
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def int8_to_bytes(int8s):
byte_matrix = np.empty([np.shape(int8s)[0], 1], dtype=np.uint8)
for i, value in enumerate(int8s):
assert isinstance(value, np.int8), (value, int8s)
as_bytes = struct.pack("b", value)
# In Python3 bytes will be a list of int, in Python2 a list of string
if isinstance(as_bytes[0], int):
byte_matrix[i] = list(as_bytes)
else:
Reported by Bandit.