The following issues were found
torch/quantization/utils.py
52 issues
Line: 7
Column: 1
import warnings
import functools
import torch
from .quant_type import QuantType, quant_type_to_str
from typing import Tuple, Any
def get_combined_dict(default_dict, additional_dict):
d = default_dict.copy()
d.update(additional_dict)
Reported by Pylint.
Line: 16
Column: 23
return d
def is_per_tensor(qscheme):
return qscheme == torch.per_tensor_affine or \
qscheme == torch.per_tensor_symmetric
def is_per_channel(qscheme):
return qscheme in [torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
Reported by Pylint.
Line: 17
Column: 20
def is_per_tensor(qscheme):
return qscheme == torch.per_tensor_affine or \
qscheme == torch.per_tensor_symmetric
def is_per_channel(qscheme):
return qscheme in [torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
torch.per_channel_symmetric]
Reported by Pylint.
Line: 20
Column: 24
qscheme == torch.per_tensor_symmetric
def is_per_channel(qscheme):
return qscheme in [torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
torch.per_channel_symmetric]
def getattr_from_fqn(obj: Any, fqn: str) -> Any:
"""
Reported by Pylint.
Line: 21
Column: 24
def is_per_channel(qscheme):
return qscheme in [torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
torch.per_channel_symmetric]
def getattr_from_fqn(obj: Any, fqn: str) -> Any:
"""
Given an obj and a fqn such as "foo.bar.baz", returns gm.foo.bar.baz.
Reported by Pylint.
Line: 22
Column: 24
def is_per_channel(qscheme):
return qscheme in [torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
torch.per_channel_symmetric]
def getattr_from_fqn(obj: Any, fqn: str) -> Any:
"""
Given an obj and a fqn such as "foo.bar.baz", returns gm.foo.bar.baz.
"""
Reported by Pylint.
Line: 39
Column: 19
return qparams
if is_per_tensor(qscheme):
qscheme = torch.per_tensor_affine
elif is_per_channel(qscheme):
# change symmetric to affine since we do not have symmetric
# quantized Tensor
if qscheme == torch.per_channel_symmetric:
qscheme = torch.per_channel_affine
Reported by Pylint.
Line: 43
Column: 23
elif is_per_channel(qscheme):
# change symmetric to affine since we do not have symmetric
# quantized Tensor
if qscheme == torch.per_channel_symmetric:
qscheme = torch.per_channel_affine
qparams["axis"] = observer_or_fake_quant.ch_axis
else:
raise RuntimeError(f"Unrecognized qscheme: {qscheme}")
# update qscheme, since we don't have symmetric quant qscheme
Reported by Pylint.
Line: 44
Column: 23
# change symmetric to affine since we do not have symmetric
# quantized Tensor
if qscheme == torch.per_channel_symmetric:
qscheme = torch.per_channel_affine
qparams["axis"] = observer_or_fake_quant.ch_axis
else:
raise RuntimeError(f"Unrecognized qscheme: {qscheme}")
# update qscheme, since we don't have symmetric quant qscheme
# in quantized Tensor
Reported by Pylint.
Line: 91
Column: 42
""" Given a qconfig, decide if the activation needs to be
quantized or not, this includes quantizing to quint8, qint8 and float16
"""
return activation_dtype(qconfig) in [torch.quint8, torch.qint8, torch.float16]
def activation_is_int8_quantized(qconfig):
""" Given a qconfig, decide if the activation needs to be
quantized to int8 or not, this includes quantizing to quint8, qint8
"""
Reported by Pylint.
caffe2/python/operator_test/detectron_keypoints.py
52 issues
Line: 1
Column: 1
try:
import cv2
except ImportError:
pass # skip if opencv is not available
Reported by Pylint.
Line: 18
Column: 1
_INFERENCE_MIN_SIZE = 0 # cfg.KRCNN.INFERENCE_MIN_SIZE
def heatmaps_to_keypoints(maps, rois):
"""Extracts predicted keypoint locations from heatmaps. Output has shape
(#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob)
for each keypoint.
"""
# This function converts a discrete image coordinate in a HEATMAP_SIZE x
Reported by Pylint.
Line: 62
Column: 9
# Bring back to CHW
roi_map = np.transpose(roi_map, [2, 0, 1])
roi_map_probs = scores_to_probs(roi_map.copy())
w = roi_map.shape[2]
for k in range(num_keypoints):
pos = roi_map[k, :, :].argmax()
x_int = pos % w
y_int = (pos - x_int) // w
assert (roi_map_probs[k, y_int, x_int] ==
Reported by Pylint.
Line: 67
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
pos = roi_map[k, :, :].argmax()
x_int = pos % w
y_int = (pos - x_int) // w
assert (roi_map_probs[k, y_int, x_int] ==
roi_map_probs[k, :, :].max())
x = (x_int + 0.5) * width_correction
y = (y_int + 0.5) * height_correction
xy_preds[i, 0, k] = x + offset_x[i]
xy_preds[i, 1, k] = y + offset_y[i]
Reported by Bandit.
Line: 69
Column: 13
y_int = (pos - x_int) // w
assert (roi_map_probs[k, y_int, x_int] ==
roi_map_probs[k, :, :].max())
x = (x_int + 0.5) * width_correction
y = (y_int + 0.5) * height_correction
xy_preds[i, 0, k] = x + offset_x[i]
xy_preds[i, 1, k] = y + offset_y[i]
xy_preds[i, 2, k] = roi_map[k, y_int, x_int]
xy_preds[i, 3, k] = roi_map_probs[k, y_int, x_int]
Reported by Pylint.
Line: 70
Column: 13
assert (roi_map_probs[k, y_int, x_int] ==
roi_map_probs[k, :, :].max())
x = (x_int + 0.5) * width_correction
y = (y_int + 0.5) * height_correction
xy_preds[i, 0, k] = x + offset_x[i]
xy_preds[i, 1, k] = y + offset_y[i]
xy_preds[i, 2, k] = roi_map[k, y_int, x_int]
xy_preds[i, 3, k] = roi_map_probs[k, y_int, x_int]
Reported by Pylint.
Line: 82
Column: 9
def scores_to_probs(scores):
"""Transforms CxHxW of scores to probabilities spatially."""
channels = scores.shape[0]
for c in range(channels):
temp = scores[c, :, :]
max_score = temp.max()
temp = np.exp(temp - max_score) / np.sum(np.exp(temp - max_score))
scores[c, :, :] = temp
return scores
Reported by Pylint.
Line: 90
Column: 1
return scores
def approx_heatmap_keypoint(heatmaps_in, bboxes_in):
'''
Mask R-CNN uses bicubic upscaling before taking the maximum of the heat map
for keypoints. We are using bilinear upscaling, which means we can approximate
the maximum coordinate with the low dimension maximum coordinates. We would like
to avoid bicubic upscaling, because it is computationally expensive. Brown and
Reported by Pylint.
Line: 90
Column: 1
return scores
def approx_heatmap_keypoint(heatmaps_in, bboxes_in):
'''
Mask R-CNN uses bicubic upscaling before taking the maximum of the heat map
for keypoints. We are using bilinear upscaling, which means we can approximate
the maximum coordinate with the low dimension maximum coordinates. We would like
to avoid bicubic upscaling, because it is computationally expensive. Brown and
Reported by Pylint.
Line: 109
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
and super simple, though we need a linear solver.
'''
assert len(bboxes_in.shape) == 2
N = bboxes_in.shape[0]
assert bboxes_in.shape[1] == 4
assert len(heatmaps_in.shape) == 4
assert heatmaps_in.shape[0] == N
keypoint_count = heatmaps_in.shape[1]
Reported by Bandit.
benchmarks/tensorexpr/concat.py
52 issues
Line: 1
Column: 1
from . import benchmark
import numpy as np
import torch
class Concat2D2InputBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, I1_D1, I1_D2, I2_D1, I2_D2, concat_dim):
super().__init__(mode, device, dtype)
self.I1_D1 = I1_D1
self.I1_D2 = I1_D2
Reported by Pylint.
Line: 3
Column: 1
from . import benchmark
import numpy as np
import torch
class Concat2D2InputBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, I1_D1, I1_D2, I2_D1, I2_D2, concat_dim):
super().__init__(mode, device, dtype)
self.I1_D1 = I1_D1
self.I1_D2 = I1_D2
Reported by Pylint.
Line: 24
Column: 88
return y
def reference(self):
return np.concatenate((self.numpy(self.input1), self.numpy(self.input2)), axis=concat_dim)
def config(self):
return [self.I1_D1, self.I1_D2, self.I2_D1, self.I2_D2, self.concat_dim]
@staticmethod
Reported by Pylint.
Line: 85
Column: 88
return z
def reference(self):
return np.concatenate((self.numpy(self.input1), self.numpy(self.input2)), axis=concat_dim)
def config(self):
return [self.I1_D1, self.I1_D2, self.I2_D1, self.I2_D2, self.concat_dim]
@staticmethod
Reported by Pylint.
Line: 1
Column: 1
from . import benchmark
import numpy as np
import torch
class Concat2D2InputBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, I1_D1, I1_D2, I2_D1, I2_D2, concat_dim):
super().__init__(mode, device, dtype)
self.I1_D1 = I1_D1
self.I1_D2 = I1_D2
Reported by Pylint.
Line: 2
Column: 1
from . import benchmark
import numpy as np
import torch
class Concat2D2InputBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, I1_D1, I1_D2, I2_D1, I2_D2, concat_dim):
super().__init__(mode, device, dtype)
self.I1_D1 = I1_D1
self.I1_D2 = I1_D2
Reported by Pylint.
Line: 3
Column: 1
from . import benchmark
import numpy as np
import torch
class Concat2D2InputBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, I1_D1, I1_D2, I2_D1, I2_D2, concat_dim):
super().__init__(mode, device, dtype)
self.I1_D1 = I1_D1
self.I1_D2 = I1_D2
Reported by Pylint.
Line: 5
Column: 1
import numpy as np
import torch
class Concat2D2InputBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, I1_D1, I1_D2, I2_D1, I2_D2, concat_dim):
super().__init__(mode, device, dtype)
self.I1_D1 = I1_D1
self.I1_D2 = I1_D2
self.I2_D1 = I2_D1
Reported by Pylint.
Line: 5
Column: 1
import numpy as np
import torch
class Concat2D2InputBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, I1_D1, I1_D2, I2_D1, I2_D2, concat_dim):
super().__init__(mode, device, dtype)
self.I1_D1 = I1_D1
self.I1_D2 = I1_D2
self.I2_D1 = I2_D1
Reported by Pylint.
Line: 6
Column: 5
import torch
class Concat2D2InputBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, I1_D1, I1_D2, I2_D1, I2_D2, concat_dim):
super().__init__(mode, device, dtype)
self.I1_D1 = I1_D1
self.I1_D2 = I1_D2
self.I2_D1 = I2_D1
self.I2_D2 = I2_D2
Reported by Pylint.
torch/nn/utils/rnn.py
52 issues
Line: 6
Column: 1
import torch
from torch import Tensor
from ... import _VF
from ..._jit_internal import Optional
from typing import List, Tuple
Reported by Pylint.
Line: 7
Column: 1
import torch
from torch import Tensor
from ... import _VF
from ..._jit_internal import Optional
from typing import List, Tuple
Reported by Pylint.
Line: 82
Column: 14
def cuda(self, *args, **kwargs):
# Tests to see if 'cuda' should be added to kwargs
ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs)
if ex.is_cuda:
return self.to(*args, **kwargs)
return self.to(*args, device='cuda', **kwargs)
def cpu(self, *args, **kwargs):
Reported by Pylint.
Line: 89
Column: 14
def cpu(self, *args, **kwargs):
ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs)
if ex.device.type == 'cpu':
return self.to(*args, **kwargs)
return self.to(*args, device='cpu', **kwargs)
def double(self):
Reported by Pylint.
Line: 95
Column: 30
return self.to(*args, device='cpu', **kwargs)
def double(self):
return self.to(dtype=torch.double)
def float(self):
return self.to(dtype=torch.float)
def half(self):
Reported by Pylint.
Line: 98
Column: 30
return self.to(dtype=torch.double)
def float(self):
return self.to(dtype=torch.float)
def half(self):
return self.to(dtype=torch.half)
def long(self):
Reported by Pylint.
Line: 101
Column: 30
return self.to(dtype=torch.float)
def half(self):
return self.to(dtype=torch.half)
def long(self):
return self.to(dtype=torch.long)
def int(self):
Reported by Pylint.
Line: 104
Column: 30
return self.to(dtype=torch.half)
def long(self):
return self.to(dtype=torch.long)
def int(self):
return self.to(dtype=torch.int)
def short(self):
Reported by Pylint.
Line: 107
Column: 30
return self.to(dtype=torch.long)
def int(self):
return self.to(dtype=torch.int)
def short(self):
return self.to(dtype=torch.short)
def char(self):
Reported by Pylint.
Line: 110
Column: 30
return self.to(dtype=torch.int)
def short(self):
return self.to(dtype=torch.short)
def char(self):
return self.to(dtype=torch.int8)
def byte(self):
Reported by Pylint.
test/test_multiprocessing_spawn.py
52 issues
Line: 9
Column: 1
import time
import unittest
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN)
import torch.multiprocessing as mp
def test_success_func(i):
pass
Reported by Pylint.
Line: 10
Column: 1
import unittest
from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN)
import torch.multiprocessing as mp
def test_success_func(i):
pass
Reported by Pylint.
Line: 100
Column: 9
# After all processes (nproc=2) have joined it must return True
mp_context.join(timeout=None)
mp_context.join(timeout=None)
self.assertTrue(mp_context.join(timeout=None))
def test_first_argument_index(self):
context = mp.get_context(self.start_method)
queue = context.SimpleQueue()
mp.start_processes(test_success_single_arg_func, args=(queue,), nprocs=2, start_method=self.start_method)
Reported by Pylint.
Line: 106
Column: 9
context = mp.get_context(self.start_method)
queue = context.SimpleQueue()
mp.start_processes(test_success_single_arg_func, args=(queue,), nprocs=2, start_method=self.start_method)
self.assertEqual([0, 1], sorted([queue.get(), queue.get()]))
def test_exception_single(self):
nprocs = 2
for i in range(nprocs):
with self.assertRaisesRegex(
Reported by Pylint.
Line: 111
Column: 18
def test_exception_single(self):
nprocs = 2
for i in range(nprocs):
with self.assertRaisesRegex(
Exception,
"\nValueError: legitimate exception from process %d$" % i,
):
mp.start_processes(test_exception_single_func, args=(i,), nprocs=nprocs, start_method=self.start_method)
Reported by Pylint.
Line: 118
Column: 14
mp.start_processes(test_exception_single_func, args=(i,), nprocs=nprocs, start_method=self.start_method)
def test_exception_all(self):
with self.assertRaisesRegex(
Exception,
"\nValueError: legitimate exception from process (0|1)$",
):
mp.start_processes(test_exception_all_func, nprocs=2, start_method=self.start_method)
Reported by Pylint.
Line: 136
Column: 14
if IS_WINDOWS:
message = "process 0 terminated with exit code 22"
with self.assertRaisesRegex(Exception, message):
mp.start_processes(test_terminate_signal_func, nprocs=2, start_method=self.start_method)
def test_terminate_exit(self):
exitcode = 123
with self.assertRaisesRegex(
Reported by Pylint.
Line: 141
Column: 14
def test_terminate_exit(self):
exitcode = 123
with self.assertRaisesRegex(
Exception,
"process 0 terminated with exit code %d" % exitcode,
):
mp.start_processes(test_terminate_exit_func, args=(exitcode,), nprocs=2, start_method=self.start_method)
Reported by Pylint.
Line: 149
Column: 14
def test_success_first_then_exception(self):
exitcode = 123
with self.assertRaisesRegex(
Exception,
"ValueError: legitimate exception",
):
mp.start_processes(test_success_first_then_exception_func, args=(exitcode,), nprocs=2, start_method=self.start_method)
Reported by Pylint.
Line: 187
Column: 13
# alive after (nested_child_sleep / 2) seconds. By
# extension, this test times out with an assertion error
# after (nested_child_sleep / 2) seconds.
self.assertLess(time.time() - start, nested_child_sleep / 2)
time.sleep(0.1)
@unittest.skipIf(
NO_MULTIPROCESSING_SPAWN,
"Disabled for environments that don't support the spawn start method")
Reported by Pylint.
benchmarks/operator_benchmark/pt/qpool_test.py
52 issues
Line: 1
Column: 1
import torch
import operator_benchmark as op_bench
# 2D pooling will have input matrix of rank 3 or 4
qpool2d_long_configs = op_bench.config_list(
attrs=(
# C H W k s p
( 1, 3, 3, (3, 3), (1, 1), (0, 0)), # dummy # noqa: E201,E241
Reported by Pylint.
Line: 6
Column: 24
import operator_benchmark as op_bench
# 2D pooling will have input matrix of rank 3 or 4
qpool2d_long_configs = op_bench.config_list(
attrs=(
# C H W k s p
( 1, 3, 3, (3, 3), (1, 1), (0, 0)), # dummy # noqa: E201,E241
( 3, 64, 64, (3, 3), (2, 2), (1, 1)), # dummy # noqa: E201,E241
# VGG16 pools with original input shape: (-1, 3, 224, 224)
Reported by Pylint.
Line: 25
Column: 25
tags=('long',)
)
qpool2d_short_configs = op_bench.config_list(
attrs=((1, 3, 3, (3, 3), (1, 1), (0, 0)),), # dummy
attr_names=('C', 'H', 'W', # Input layout
'k', 's', 'p'), # Pooling parameters
cross_product_configs={
'N': (2,),
Reported by Pylint.
Line: 37
Column: 36
tags=('short',)
)
qadaptive_avgpool2d_long_configs = op_bench.cross_product_configs(
input_size=(
# VGG16 pools with original input shape: (-1, 3, 224, 224)
(112, 112), # MaxPool2d-9
),
output_size=(
Reported by Pylint.
Line: 57
Column: 37
tags=('long',)
)
qadaptive_avgpool2d_short_configs = op_bench.config_list(
attrs=((4, 3, (224, 224), (112, 112), True),),
attr_names=('N', 'C', 'input_size', 'output_size', 'contig'),
cross_product_configs={
'dtype': (torch.qint32, torch.qint8, torch.quint8),
},
Reported by Pylint.
Line: 67
Column: 29
)
class _QPool2dBenchmarkBase(op_bench.TorchBenchmarkBase):
def setup(self, N, C, H, W, dtype, contig):
# Input
if N == 0:
f_input = (torch.rand(C, H, W) - 0.5) * 256
else:
Reported by Pylint.
Line: 122
Column: 1
contig=contig)
op_bench.generate_pt_test(qadaptive_avgpool2d_short_configs + qadaptive_avgpool2d_long_configs,
QAdaptiveAvgPool2dBenchmark)
op_bench.generate_pt_test(qpool2d_short_configs + qpool2d_long_configs,
QAvgPool2dBenchmark)
op_bench.generate_pt_test(qpool2d_short_configs + qpool2d_long_configs,
QMaxPool2dBenchmark)
Reported by Pylint.
Line: 124
Column: 1
op_bench.generate_pt_test(qadaptive_avgpool2d_short_configs + qadaptive_avgpool2d_long_configs,
QAdaptiveAvgPool2dBenchmark)
op_bench.generate_pt_test(qpool2d_short_configs + qpool2d_long_configs,
QAvgPool2dBenchmark)
op_bench.generate_pt_test(qpool2d_short_configs + qpool2d_long_configs,
QMaxPool2dBenchmark)
Reported by Pylint.
Line: 126
Column: 1
QAdaptiveAvgPool2dBenchmark)
op_bench.generate_pt_test(qpool2d_short_configs + qpool2d_long_configs,
QAvgPool2dBenchmark)
op_bench.generate_pt_test(qpool2d_short_configs + qpool2d_long_configs,
QMaxPool2dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 79
Column: 9
zero_point = 0
# Quantize the tensor
self.q_input = torch.quantize_per_tensor(f_input, scale=scale,
zero_point=zero_point,
dtype=dtype)
if not contig:
# Permute into NHWC and back to make it non-contiguous
if N == 0:
Reported by Pylint.
caffe2/python/operator_test/roi_align_rotated_op_test.py
52 issues
Line: 7
Column: 1
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import copy
Reported by Pylint.
Line: 9
Column: 1
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import copy
class RoIAlignRotatedOp(hu.HypothesisTestCase):
Reported by Pylint.
Line: 125
Column: 48
if axes[0] == axes[1] or np.absolute(axes[0] - axes[1]) == m.ndim:
raise ValueError("Axes must be different.")
if (axes[0] >= m.ndim or axes[0] < -m.ndim or
axes[1] >= m.ndim or axes[1] < -m.ndim):
raise ValueError(
"Axes={} out of range for array of ndim={}.".format(axes, m.ndim))
k %= 4
Reported by Pylint.
Line: 126
Column: 52
raise ValueError("Axes must be different.")
if (axes[0] >= m.ndim or axes[0] < -m.ndim or
axes[1] >= m.ndim or axes[1] < -m.ndim):
raise ValueError(
"Axes={} out of range for array of ndim={}.".format(axes, m.ndim))
k %= 4
Reported by Pylint.
Line: 34
Column: 72
pooled_size=st.sampled_from([7, 14]),
**hu.gcs
)
def test_horizontal_rois(self, H, W, C, num_rois, pooled_size, gc, dc):
"""
Test that results match with RoIAlign when angle=0.
"""
X = np.random.randn(1, C, H, W).astype(np.float32)
R = np.zeros((num_rois, 6)).astype(np.float32)
Reported by Pylint.
Line: 92
Column: 58
**hu.gcs
)
def test_simple_rotations(
self, H, W, C, num_rois, pooled_size, angle, gc, dc
):
"""
Test with right-angled rotations that don't need interpolation.
"""
X = np.random.randn(1, C, H, W).astype(np.float32)
Reported by Pylint.
Line: 154
Column: 17
try:
indexer[axis] = slice(None, None, -1)
except IndexError:
raise ValueError("axis=%i is invalid for the %i-dimensional input array"
% (axis, m.ndim))
return m[tuple(indexer)]
def roialign_ref(X, R):
# `angle` denotes counter-clockwise rotation. Rotate the input
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
Reported by Pylint.
Line: 11
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import copy
class RoIAlignRotatedOp(hu.HypothesisTestCase):
def bbox_xywh_to_xyxy(self, boxes):
"""
Reported by Pylint.
Line: 14
Column: 1
import copy
class RoIAlignRotatedOp(hu.HypothesisTestCase):
def bbox_xywh_to_xyxy(self, boxes):
"""
Convert from [center_x center_y w h] format to [x1 y1 x2 y2].
"""
w, h = boxes[:, 2], boxes[:, 3]
Reported by Pylint.
benchmarks/operator_benchmark/benchmark_core.py
52 issues
Line: 5
Column: 1
import numpy as np
import timeit
import json
import torch
import copy
import ast
# needs to be imported after torch
import torch.utils.cpp_extension as cpp_extension # noqa: F401
Reported by Pylint.
Line: 10
Column: 1
import ast
# needs to be imported after torch
import torch.utils.cpp_extension as cpp_extension # noqa: F401
import benchmark_utils
from collections import namedtuple
"""Performance microbenchmarks.
Reported by Pylint.
Line: 12
Column: 1
# needs to be imported after torch
import torch.utils.cpp_extension as cpp_extension # noqa: F401
import benchmark_utils
from collections import namedtuple
"""Performance microbenchmarks.
This module contains core functionalities for performance microbenchmark tests.
Reported by Pylint.
Line: 10
Column: 1
import ast
# needs to be imported after torch
import torch.utils.cpp_extension as cpp_extension # noqa: F401
import benchmark_utils
from collections import namedtuple
"""Performance microbenchmarks.
Reported by Pylint.
Line: 15
Column: 1
import benchmark_utils
from collections import namedtuple
"""Performance microbenchmarks.
This module contains core functionalities for performance microbenchmark tests.
"""
"""
Reported by Pylint.
Line: 20
Column: 1
This module contains core functionalities for performance microbenchmark tests.
"""
"""
This is used to store configs of tests
An example input is:
TestConfig(test_name='add_M8_N2_K1', input_config='M: 8, N: 2, K: 1',
tag='long', run_backward=False)
"""
Reported by Pylint.
Line: 104
Column: 9
if tags is None:
raise ValueError("Missing tags in configs")
input_config = str(test_attrs)[1:-1].replace('\'', '')
op = bench_op()
assert op is not None, "Can't create test"
tensor_error_info = None
# op_name_function is a dictionary which has op_name and op_function.
# an example of op_name_function is:
Reported by Pylint.
Line: 107
Column: 9
input_config = str(test_attrs)[1:-1].replace('\'', '')
op = bench_op()
assert op is not None, "Can't create test"
tensor_error_info = None
# op_name_function is a dictionary which has op_name and op_function.
# an example of op_name_function is:
# {'op_name' : 'abs', 'op_function' : torch.abs}
# op_function is concatenated with the input dict then passed to the init function
# op_name is passed to the set_module_name function
Reported by Pylint.
Line: 119
Column: 9
init_dict.update({'op_func' : op_name_function['op_func']})
op.set_module_name(op_name)
op._set_backward_test(run_backward)
op.init(**init_dict)
op.extract_inputs_tuple()
if not run_backward:
for _, attr in vars(op).items():
Reported by Pylint.
Line: 133
Column: 12
# _num_inputs_require_grads is used to track the number of tensors
# which use auto_set().
if op._num_inputs_require_grads > 0:
input_name = 'all'
yield _create_test(op, test_attrs, tags, OperatorTestCase, run_backward, input_name)
# This for loop is only used when auto_set is used.
# _pass_count counts how many times init has been called.
Reported by Pylint.
torch/distributions/multivariate_normal.py
52 issues
Line: 20
Column: 12
to a batch shape. They are not necessarily assumed to have the same batch shape,
just ones which can be broadcasted.
"""
return torch.matmul(bmat, bvec.unsqueeze(-1)).squeeze(-1)
def _batch_mahalanobis(bL, bx):
r"""
Computes the squared Mahalanobis distance :math:`\mathbf{x}^\top\mathbf{M}^{-1}\mathbf{x}`
Reported by Pylint.
Line: 57
Column: 14
flat_L = bL.reshape(-1, n, n) # shape = b x n x n
flat_x = bx.reshape(-1, flat_L.size(0), n) # shape = c x b x n
flat_x_swap = flat_x.permute(1, 2, 0) # shape = b x n x c
M_swap = torch.triangular_solve(flat_x_swap, flat_L, upper=False)[0].pow(2).sum(-2) # shape = b x c
M = M_swap.t() # shape = c x b
# Now we revert the above reshape and permute operators.
permuted_M = M.reshape(bx.shape[:-1]) # shape = (..., 1, j, i, 1)
permute_inv_dims = list(range(outer_batch_dims))
Reported by Pylint.
Line: 71
Column: 32
def _precision_to_scale_tril(P):
# Ref: https://nbviewer.jupyter.org/gist/fehiepsi/5ef8e09e61604f10607380467eb82006#Precision-to-scale_tril
Lf = torch.linalg.cholesky(torch.flip(P, (-2, -1)))
L_inv = torch.transpose(torch.flip(Lf, (-2, -1)), -2, -1)
L = torch.triangular_solve(torch.eye(P.shape[-1], dtype=P.dtype, device=P.device),
L_inv, upper=False)[0]
return L
Reported by Pylint.
Line: 72
Column: 13
def _precision_to_scale_tril(P):
# Ref: https://nbviewer.jupyter.org/gist/fehiepsi/5ef8e09e61604f10607380467eb82006#Precision-to-scale_tril
Lf = torch.linalg.cholesky(torch.flip(P, (-2, -1)))
L_inv = torch.transpose(torch.flip(Lf, (-2, -1)), -2, -1)
L = torch.triangular_solve(torch.eye(P.shape[-1], dtype=P.dtype, device=P.device),
L_inv, upper=False)[0]
return L
Reported by Pylint.
Line: 72
Column: 29
def _precision_to_scale_tril(P):
# Ref: https://nbviewer.jupyter.org/gist/fehiepsi/5ef8e09e61604f10607380467eb82006#Precision-to-scale_tril
Lf = torch.linalg.cholesky(torch.flip(P, (-2, -1)))
L_inv = torch.transpose(torch.flip(Lf, (-2, -1)), -2, -1)
L = torch.triangular_solve(torch.eye(P.shape[-1], dtype=P.dtype, device=P.device),
L_inv, upper=False)[0]
return L
Reported by Pylint.
Line: 73
Column: 9
# Ref: https://nbviewer.jupyter.org/gist/fehiepsi/5ef8e09e61604f10607380467eb82006#Precision-to-scale_tril
Lf = torch.linalg.cholesky(torch.flip(P, (-2, -1)))
L_inv = torch.transpose(torch.flip(Lf, (-2, -1)), -2, -1)
L = torch.triangular_solve(torch.eye(P.shape[-1], dtype=P.dtype, device=P.device),
L_inv, upper=False)[0]
return L
class MultivariateNormal(Distribution):
Reported by Pylint.
Line: 73
Column: 32
# Ref: https://nbviewer.jupyter.org/gist/fehiepsi/5ef8e09e61604f10607380467eb82006#Precision-to-scale_tril
Lf = torch.linalg.cholesky(torch.flip(P, (-2, -1)))
L_inv = torch.transpose(torch.flip(Lf, (-2, -1)), -2, -1)
L = torch.triangular_solve(torch.eye(P.shape[-1], dtype=P.dtype, device=P.device),
L_inv, upper=False)[0]
return L
class MultivariateNormal(Distribution):
Reported by Pylint.
Line: 157
Column: 23
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(MultivariateNormal, _instance)
batch_shape = torch.Size(batch_shape)
loc_shape = batch_shape + self.event_shape
cov_shape = batch_shape + self.event_shape + self.event_shape
new.loc = self.loc.expand(loc_shape)
new._unbroadcasted_scale_tril = self._unbroadcasted_scale_tril
if 'covariance_matrix' in self.__dict__:
Reported by Pylint.
Line: 175
Column: 5
return new
@lazy_property
def scale_tril(self):
return self._unbroadcasted_scale_tril.expand(
self._batch_shape + self._event_shape + self._event_shape)
@lazy_property
def covariance_matrix(self):
Reported by Pylint.
Line: 180
Column: 5
self._batch_shape + self._event_shape + self._event_shape)
@lazy_property
def covariance_matrix(self):
return (torch.matmul(self._unbroadcasted_scale_tril,
self._unbroadcasted_scale_tril.transpose(-1, -2))
.expand(self._batch_shape + self._event_shape + self._event_shape))
@lazy_property
Reported by Pylint.
caffe2/contrib/fakelowp/test/test_int8_ops_nnpi.py
52 issues
Line: 1
Column: 1
import caffe2.python.fakelowp.init_shared_libs # noqa
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from hypothesis import given, strategies as st, settings
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
Reported by Pylint.
Line: 3
Column: 1
import caffe2.python.fakelowp.init_shared_libs # noqa
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from hypothesis import given, strategies as st, settings
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
Reported by Pylint.
Line: 4
Column: 1
import caffe2.python.fakelowp.init_shared_libs # noqa
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from hypothesis import given, strategies as st, settings
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
Reported by Pylint.
Line: 5
Column: 1
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from hypothesis import given, strategies as st, settings
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
core.GlobalInit(["caffe2",
Reported by Pylint.
Line: 6
Column: 1
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from hypothesis import given, strategies as st, settings
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
core.GlobalInit(["caffe2",
"--caffe2_log_level=-3",
Reported by Pylint.
Line: 7
Column: 1
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from hypothesis import given, strategies as st, settings
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
core.GlobalInit(["caffe2",
"--caffe2_log_level=-3",
"--glow_global_fp16=1",
Reported by Pylint.
Line: 1
Column: 1
import caffe2.python.fakelowp.init_shared_libs # noqa
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from hypothesis import given, strategies as st, settings
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
Reported by Pylint.
Line: 131
Column: 3
n=st.integers(1, 1024),
m=st.integers(1, 1024),
k=st.integers(1, 1024),
f=st.integers(1, 1), # TODO: figure a safe number to increase
rand_seed=st.integers(0, 65534),
quantize_bias=st.sampled_from([False]),
)
@settings(deadline=datetime.timedelta(seconds=50))
def test_int8_fc(
Reported by Pylint.
Line: 1
Column: 1
import caffe2.python.fakelowp.init_shared_libs # noqa
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from hypothesis import given, strategies as st, settings
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
Reported by Pylint.
Line: 6
Column: 1
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from hypothesis import given, strategies as st, settings
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
core.GlobalInit(["caffe2",
"--caffe2_log_level=-3",
Reported by Pylint.