The following issues were found
caffe2/quantization/server/elementwise_sum_dnnlowp_op_test.py
45 issues
Line: 6
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
Reported by Pylint.
Line: 25
Column: 68
is_empty=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_elementwise_sum_int(self, N, M, is_empty, gc, dc):
if is_empty:
N = 0
# All inputs have scale 1, so exactly represented after quantization
inputs = M * [None]
X_names = M * [None]
Reported by Pylint.
Line: 88
Column: 66
# correctness test with no quantization error in inputs
@given(N=st.integers(32, 256), M=st.integers(1, 3), **hu.gcs_cpu_only)
def test_dnnlowp_elementwise_sum_int_inplace(self, N, M, gc, dc):
# All inputs have scale 1, so exactly represented after quantization
inputs = M * [None]
X_names = M * [None]
X_q_names = M * [None]
Reported by Pylint.
Line: 154
Column: 63
# correctness test with no quantization error in inputs
@given(N=st.integers(32, 256), M=st.integers(1, 3), **hu.gcs_cpu_only)
def test_dnnlowp_elementwise_sum_relu_int(self, N, M, gc, dc):
# All inputs have scale 1, so exactly represented after quantization
inputs = M * [None]
X_names = M * [None]
X_q_names = M * [None]
Reported by Pylint.
Line: 218
Column: 71
# correctness test with no quantization error in inputs
@given(N=st.integers(32, 256), M=st.integers(1, 3), **hu.gcs_cpu_only)
def test_dnnlowp_elementwise_sum_relu_int_inplace(self, N, M, gc, dc):
# All inputs have scale 1, so exactly represented after quantization
inputs = M * [None]
X_names = M * [None]
X_q_names = M * [None]
Reported by Pylint.
Line: 1
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
Reported by Pylint.
Line: 17
Column: 1
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPOpSumOpTest(hu.HypothesisTestCase):
# correctness test with no quantization error in inputs
@given(
N=st.integers(32, 256),
M=st.integers(1, 3),
is_empty=st.booleans(),
Reported by Pylint.
Line: 24
Column: 5
M=st.integers(1, 3),
is_empty=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_elementwise_sum_int(self, N, M, is_empty, gc, dc):
if is_empty:
N = 0
# All inputs have scale 1, so exactly represented after quantization
inputs = M * [None]
Reported by Pylint.
Line: 24
Column: 5
M=st.integers(1, 3),
is_empty=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_elementwise_sum_int(self, N, M, is_empty, gc, dc):
if is_empty:
N = 0
# All inputs have scale 1, so exactly represented after quantization
inputs = M * [None]
Reported by Pylint.
caffe2/python/optimizer_test_util.py
45 issues
Line: 47
Column: 9
sq = model.SquaredL2Distance([out, 'label'])
loss = model.AveragedLoss(sq, "avg_loss")
grad_map = model.AddGradientOperators([loss])
self.assertIsInstance(grad_map['fc_w'], core.BlobReference)
return (model, perfect_model, data, label)
def testDense(self):
model, perfect_model, data, label = self._createDense()
optimizer = self.build_optimizer(model)
Reported by Pylint.
Line: 52
Column: 21
def testDense(self):
model, perfect_model, data, label = self._createDense()
optimizer = self.build_optimizer(model)
workspace.FeedBlob('data', data[0])
workspace.FeedBlob('label', label[0])
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
for _ in range(2000):
Reported by Pylint.
Line: 68
Column: 9
workspace.FetchBlob('fc_w'),
atol=1e-2
)
self.check_optimizer(optimizer)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
def testGPUDense(self, dtype=core.DataType.FLOAT):
device_opt = core.DeviceOption(workspace.GpuDeviceType, 0)
with core.DeviceScope(device_opt):
Reported by Pylint.
Line: 87
Column: 9
brew.fc(model, 'fc_cpu', 'fc2', dim_in=1, dim_out=10, axis=0)
# Create optimizer in default device scope
self.build_optimizer(model)
if self._skip_gpu:
return
# Run net to see it does not crash
Reported by Pylint.
Line: 89
Column: 12
# Create optimizer in default device scope
self.build_optimizer(model)
if self._skip_gpu:
return
# Run net to see it does not crash
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
Reported by Pylint.
Line: 119
Column: 9
sq = model.SquaredL2Distance([out, 'label'])
loss = model.AveragedLoss(sq, "avg_loss")
grad_map = model.AddGradientOperators([loss])
self.assertIsInstance(grad_map['w'], core.GradientSlice)
optimizer = self.build_optimizer(model)
workspace.CreateBlob('indices')
workspace.CreateBlob('label')
Reported by Pylint.
Line: 120
Column: 21
loss = model.AveragedLoss(sq, "avg_loss")
grad_map = model.AddGradientOperators([loss])
self.assertIsInstance(grad_map['w'], core.GradientSlice)
optimizer = self.build_optimizer(model)
workspace.CreateBlob('indices')
workspace.CreateBlob('label')
for indices_type in [np.int32, np.int64]:
Reported by Pylint.
Line: 148
Column: 9
workspace.FetchBlob('w'),
atol=1e-2
)
self.check_optimizer(optimizer)
class LRModificationTestBase(object):
"""
This is an abstract base class.
Reported by Pylint.
Line: 178
Column: 45
def test_global_norm_based_gradient_clipping(self):
max_gradient_norm = 1.0
model, perfect_model, data, label = self._createDense()
opt = self.build_optimizer(model, max_gradient_norm=max_gradient_norm)
params = []
for param in model.GetParams(top_scope=True):
if param in model.param_to_grad:
Reported by Pylint.
Line: 179
Column: 15
def test_global_norm_based_gradient_clipping(self):
max_gradient_norm = 1.0
model, perfect_model, data, label = self._createDense()
opt = self.build_optimizer(model, max_gradient_norm=max_gradient_norm)
params = []
for param in model.GetParams(top_scope=True):
if param in model.param_to_grad:
if not isinstance(
Reported by Pylint.
caffe2/python/operator_test/adagrad_test.py
45 issues
Line: 5
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
from caffe2.python.operator_test.adagrad_test_helper import (
adagrad_sparse_test_helper,
ref_adagrad,
Reported by Pylint.
Line: 12
Column: 1
adagrad_sparse_test_helper,
ref_adagrad,
)
from hypothesis import HealthCheck, given, settings
class TestAdagrad(serial.SerializedTestCase):
@given(
inputs=hu.tensors(n=3),
Reported by Pylint.
Line: 28
Column: 67
**hu.gcs
)
@settings(deadline=10000)
def test_adagrad(self, inputs, lr, epsilon, weight_decay, gc, dc):
param, momentum, grad = inputs
momentum = np.abs(momentum)
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator(
Reported by Pylint.
Line: 62
Column: 54
)
@settings(deadline=10000)
def test_adagrad_output_effective_lr(
self, inputs, lr, epsilon, weight_decay, gc, dc
):
param, momentum, grad = inputs
momentum = np.abs(momentum)
lr = np.array([lr], dtype=np.float32)
Reported by Pylint.
Line: 100
Column: 84
**hu.gcs_cpu_only
)
@settings(deadline=10000)
def test_adagrad_output_effective_lr_and_update(self, inputs, lr, epsilon, gc, dc):
param, momentum, grad = inputs
momentum = np.abs(momentum)
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator(
Reported by Pylint.
Line: 1
Column: 1
import functools
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
from caffe2.python.operator_test.adagrad_test_helper import (
adagrad_sparse_test_helper,
Reported by Pylint.
Line: 15
Column: 1
from hypothesis import HealthCheck, given, settings
class TestAdagrad(serial.SerializedTestCase):
@given(
inputs=hu.tensors(n=3),
lr=st.floats(
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
),
Reported by Pylint.
Line: 27
Column: 5
weight_decay=st.sampled_from([0.0, 0.1]),
**hu.gcs
)
@settings(deadline=10000)
def test_adagrad(self, inputs, lr, epsilon, weight_decay, gc, dc):
param, momentum, grad = inputs
momentum = np.abs(momentum)
lr = np.array([lr], dtype=np.float32)
Reported by Pylint.
Line: 27
Column: 5
weight_decay=st.sampled_from([0.0, 0.1]),
**hu.gcs
)
@settings(deadline=10000)
def test_adagrad(self, inputs, lr, epsilon, weight_decay, gc, dc):
param, momentum, grad = inputs
momentum = np.abs(momentum)
lr = np.array([lr], dtype=np.float32)
Reported by Pylint.
Line: 27
Column: 5
weight_decay=st.sampled_from([0.0, 0.1]),
**hu.gcs
)
@settings(deadline=10000)
def test_adagrad(self, inputs, lr, epsilon, weight_decay, gc, dc):
param, momentum, grad = inputs
momentum = np.abs(momentum)
lr = np.array([lr], dtype=np.float32)
Reported by Pylint.
caffe2/python/operator_test/affine_channel_op_test.py
45 issues
Line: 8
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
class TestAffineChannelOp(serial.SerializedTestCase):
Reported by Pylint.
Line: 9
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
class TestAffineChannelOp(serial.SerializedTestCase):
def affine_channel_nchw_ref(self, X, scale, bias):
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
Reported by Pylint.
Line: 13
Column: 1
import numpy as np
class TestAffineChannelOp(serial.SerializedTestCase):
def affine_channel_nchw_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
Reported by Pylint.
Line: 14
Column: 5
class TestAffineChannelOp(serial.SerializedTestCase):
def affine_channel_nchw_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
scale = scale.reshape(C, 1)
Reported by Pylint.
Line: 14
Column: 5
class TestAffineChannelOp(serial.SerializedTestCase):
def affine_channel_nchw_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
scale = scale.reshape(C, 1)
Reported by Pylint.
Line: 14
Column: 5
class TestAffineChannelOp(serial.SerializedTestCase):
def affine_channel_nchw_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
scale = scale.reshape(C, 1)
Reported by Pylint.
Line: 16
Column: 9
class TestAffineChannelOp(serial.SerializedTestCase):
def affine_channel_nchw_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
scale = scale.reshape(C, 1)
bias = bias.reshape(C, 1)
Y = X * scale + bias
Reported by Pylint.
Line: 17
Column: 9
def affine_channel_nchw_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
scale = scale.reshape(C, 1)
bias = bias.reshape(C, 1)
Y = X * scale + bias
return [Y.reshape(dims)]
Reported by Pylint.
Line: 21
Column: 9
X = X.reshape(N, C, -1)
scale = scale.reshape(C, 1)
bias = bias.reshape(C, 1)
Y = X * scale + bias
return [Y.reshape(dims)]
def affine_channel_nhwc_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
Reported by Pylint.
caffe2/python/lstm_benchmark.py
44 issues
Line: 27
Column: 14
'''
Fill a queue with input data
'''
log.info("Generating T={} sequence batches".format(T))
generate_input_init_net = core.Net('generate_input_init')
queue = generate_input_init_net.CreateBlobsQueue(
[], "inputqueue", num_blobs=1, capacity=T,
)
Reported by Pylint.
Line: 67
Column: 18
return queue, label_queue, entry_counts
def create_model(args, queue, label_queue, input_shape):
model = model_helper.ModelHelper(name="LSTM_bench")
seq_lengths, target = \
model.net.AddExternalInputs(
'seq_lengths',
'target',
Reported by Pylint.
Line: 69
Column: 18
def create_model(args, queue, label_queue, input_shape):
model = model_helper.ModelHelper(name="LSTM_bench")
seq_lengths, target = \
model.net.AddExternalInputs(
'seq_lengths',
'target',
)
Reported by Pylint.
Line: 94
Column: 33
)
init_blobs.extend([hidden_init, cell_init])
output, last_hidden, _, last_state = rnn_cell.LSTM(
model=model,
input_blob=input_blob,
seq_lengths=seq_lengths,
initial_states=init_blobs,
dim_in=args.input_dim,
Reported by Pylint.
Line: 119
Column: 9
# can infer the dimensions.
init_blobs = model.net.AddExternalInputs("hidden_init", "cell_init")
model.param_init_net.ConstantFill([], input_blob, shape=input_shape)
output, last_hidden, _ = rnn_cell.cudnn_LSTM(
model=model,
input_blob=input_blob,
initial_states=init_blobs,
dim_in=args.input_dim,
dim_out=args.hidden_dim,
Reported by Pylint.
Line: 133
Column: 5
assert False, "Unknown implementation"
weights = model.net.UniformFill(labels, "weights")
softmax, loss = model.net.SoftmaxWithLoss(
[model.Flatten(output), labels, weights],
['softmax', 'loss'],
)
if not args.forward_only:
Reported by Pylint.
Line: 163
Column: 16
return model, output
def Caffe2LSTM(args):
T = args.data_size // args.batch_size
input_blob_shape = [args.seq_length, args.batch_size, args.input_dim]
queue, label_queue, entry_counts = generate_data(T // args.seq_length,
input_blob_shape,
Reported by Pylint.
Line: 177
Column: 12
np.array([args.seq_length] * args.batch_size, dtype=np.int32)
)
model, output = create_model(args, queue, label_queue, input_blob_shape)
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
start_time = time.time()
Reported by Pylint.
Line: 193
Column: 18
if (args.gpu):
log.info("Memory stats:")
stats = utils.GetGPUMemoryUsageStats()
log.info("GPU memory:\t{} MB".format(stats['max_total'] / 1024 / 1024))
log.info("------ Starting benchmark ------")
start_time = time.time()
last_time = time.time()
for iteration in range(1, num_iters, args.iters_to_report):
Reported by Pylint.
Line: 205
Column: 13
new_time = time.time()
log.info(
"Iter: {} / {}. Entries Per Second: {}k.".format(
iteration,
num_iters,
np.sum(entry_counts[iteration:iteration + iters_once]) /
(new_time - last_time) // 100 / 10,
)
Reported by Pylint.
torch/utils/data/dataset.py
44 issues
Line: 18
Column: 1
)
# No 'default_generator' in torch/__init__.pyi
from torch import default_generator, randperm
from torch._utils import _accumulate
from torch.utils.data._typing import _DataPipeMeta
from ... import Generator, Tensor
Reported by Pylint.
Line: 18
Column: 1
)
# No 'default_generator' in torch/__init__.pyi
from torch import default_generator, randperm
from torch._utils import _accumulate
from torch.utils.data._typing import _DataPipeMeta
from ... import Generator, Tensor
Reported by Pylint.
Line: 22
Column: 1
from torch._utils import _accumulate
from torch.utils.data._typing import _DataPipeMeta
from ... import Generator, Tensor
T_co = TypeVar('T_co', covariant=True)
T = TypeVar('T')
Reported by Pylint.
Line: 95
Column: 1
cls.functions[function_name] = function
class IterableDataset(Dataset[T_co], metaclass=_DataPipeMeta):
r"""An iterable Dataset.
All datasets that represent an iterable of data samples should subclass it.
Such form of datasets is particularly useful when data come from a stream.
Reported by Pylint.
Line: 219
Column: 24
def __reduce_ex__(self, *args, **kwargs):
if IterableDataset.reduce_ex_hook is not None:
try:
return IterableDataset.reduce_ex_hook(self)
except NotImplementedError:
pass
return super().__reduce_ex__(*args, **kwargs)
@classmethod
Reported by Pylint.
Line: 303
Column: 1
return self.cumulative_sizes
class ChainDataset(IterableDataset):
r"""Dataset for chaining multiple :class:`IterableDataset` s.
This class is useful to assemble different existing dataset streams. The
chaining operation is done on-the-fly, so concatenating large-scale
datasets with this class will be efficient.
Reported by Pylint.
Line: 1
Column: 1
import bisect
import functools
import warnings
from typing import (
Callable,
Dict,
Generic,
Iterable,
Iterator,
Reported by Pylint.
Line: 24
Column: 1
from ... import Generator, Tensor
T_co = TypeVar('T_co', covariant=True)
T = TypeVar('T')
class DataChunk(list, Generic[T]):
def __init__(self, items):
Reported by Pylint.
Line: 25
Column: 1
from ... import Generator, Tensor
T_co = TypeVar('T_co', covariant=True)
T = TypeVar('T')
class DataChunk(list, Generic[T]):
def __init__(self, items):
super().__init__(items)
Reported by Pylint.
Line: 28
Column: 1
T = TypeVar('T')
class DataChunk(list, Generic[T]):
def __init__(self, items):
super().__init__(items)
self.items = items
def as_str(self, indent=''):
Reported by Pylint.
test/package/test_misc.py
44 issues
Line: 6
Column: 1
from io import BytesIO
from textwrap import dedent
from torch.package import PackageExporter, PackageImporter, is_from_package
from torch.package.package_exporter import PackagingError
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
Reported by Pylint.
Line: 7
Column: 1
from textwrap import dedent
from torch.package import PackageExporter, PackageImporter, is_from_package
from torch.package.package_exporter import PackagingError
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
Reported by Pylint.
Line: 8
Column: 1
from torch.package import PackageExporter, PackageImporter, is_from_package
from torch.package.package_exporter import PackagingError
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
Reported by Pylint.
Line: 68
Column: 13
)
with PackageExporter(buffer) as he:
import module_a
import package_a
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
he.intern("**")
Reported by Pylint.
Line: 69
Column: 13
with PackageExporter(buffer) as he:
import module_a
import package_a
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
he.intern("**")
he.save_module(module_a.__name__)
Reported by Pylint.
Line: 70
Column: 13
with PackageExporter(buffer) as he:
import module_a
import package_a
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
he.intern("**")
he.save_module(module_a.__name__)
he.save_module(package_a.__name__)
Reported by Pylint.
Line: 106
Column: 13
"""
buffer = BytesIO()
with PackageExporter(buffer) as he:
import package_a.subpackage
he.intern("**")
obj = package_a.subpackage.PackageASubpackageObject()
he.save_pickle("obj", "obj.pkl", obj)
Reported by Pylint.
Line: 125
Column: 13
"""
with PackageExporter(BytesIO()) as he:
import package_b
he.extern("package_b.subpackage_1")
he.mock("package_b.subpackage_2")
he.intern("**")
he.save_pickle("obj", "obj.pkl", package_b.PackageBObject(["a"]))
Reported by Pylint.
Line: 141
Column: 17
with self.assertRaises(PackagingError) as e:
with PackageExporter(BytesIO()) as he:
import package_b
he.deny("package_b")
he.save_pickle("obj", "obj.pkl", package_b.PackageBObject(["a"]))
self.assertEqual(he.denied_modules(), ["package_b"])
Reported by Pylint.
Line: 149
Column: 9
def test_is_from_package(self):
"""is_from_package should work for objects and modules"""
import package_a.subpackage
buffer = BytesIO()
obj = package_a.subpackage.PackageASubpackageObject()
with PackageExporter(buffer) as pe:
Reported by Pylint.
caffe2/python/operator_test/partition_ops_test.py
44 issues
Line: 56
Column: 34
def sharding(x):
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
for ind, v in enumerate(x):
suffix_shape = v.shape[len(x[0].shape):]
accum = []
Reported by Pylint.
Line: 58
Column: 32
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
for ind, v in enumerate(x):
suffix_shape = v.shape[len(x[0].shape):]
accum = []
data = v.reshape((-1, ) + suffix_shape)
Reported by Pylint.
Line: 64
Column: 28
accum = []
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
Reported by Pylint.
Line: 65
Column: 44
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
Reported by Pylint.
Line: 73
Column: 63
def join(a):
if not a:
return np.empty(shape=(0, ) + suffix_shape)
return np.stack(a)
out.append(join(accum))
return out
Reported by Pylint.
Line: 140
Column: 34
def sharding(x):
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
idx = 0
sharded_lengths = np.zeros(elements)
for ind, length in enumerate(lengths):
Reported by Pylint.
Line: 142
Column: 32
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
idx = 0
sharded_lengths = np.zeros(elements)
for ind, length in enumerate(lengths):
for _ in range(length):
if shards[idx] == i:
Reported by Pylint.
Line: 144
Column: 48
out = []
for i in range(parts):
idx = 0
sharded_lengths = np.zeros(elements)
for ind, length in enumerate(lengths):
for _ in range(length):
if shards[idx] == i:
sharded_lengths[ind] += 1
idx += 1
Reported by Pylint.
Line: 145
Column: 50
for i in range(parts):
idx = 0
sharded_lengths = np.zeros(elements)
for ind, length in enumerate(lengths):
for _ in range(length):
if shards[idx] == i:
sharded_lengths[ind] += 1
idx += 1
out.append(sharded_lengths)
Reported by Pylint.
Line: 157
Column: 28
accum = []
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
Reported by Pylint.
caffe2/python/control.py
44 issues
Line: 30
Column: 5
def _get_next_step_name(control_name, base_name):
global _current_idx, _used_step_names
concat_name = '%s/%s' % (base_name, control_name)
next_name = concat_name
while next_name in _used_step_names:
next_name = '%s_%d' % (concat_name, _current_idx)
_current_idx += 1
Reported by Pylint.
Line: 40
Column: 15
return next_name
def _MakeList(input):
""" input is a tuple.
Example:
(a, b, c) --> [a, b, c]
(a) --> [a]
([a, b, c]) --> [a, b, c]
Reported by Pylint.
Line: 204
Column: 31
else:
last_cond = merged_net.__getattr__(relation)([last_cond, curr_cond])
# merge attributes
for k, v in viewitems(condition_nets[i]._attr_dict):
merged_net._attr_dict[k] += v
merged_net.AddExternalOutput(last_cond)
return merged_net
Reported by Pylint.
Line: 205
Column: 13
last_cond = merged_net.__getattr__(relation)([last_cond, curr_cond])
# merge attributes
for k, v in viewitems(condition_nets[i]._attr_dict):
merged_net._attr_dict[k] += v
merged_net.AddExternalOutput(last_cond)
return merged_net
Reported by Pylint.
Line: 25
Column: 1
# Used to generate names of the steps created by the control functions.
# It is actually the internal index of these steps.
_current_idx = 1
_used_step_names = set()
def _get_next_step_name(control_name, base_name):
global _current_idx, _used_step_names
Reported by Pylint.
Line: 30
Column: 5
def _get_next_step_name(control_name, base_name):
global _current_idx, _used_step_names
concat_name = '%s/%s' % (base_name, control_name)
next_name = concat_name
while next_name in _used_step_names:
next_name = '%s_%d' % (concat_name, _current_idx)
_current_idx += 1
Reported by Pylint.
Line: 30
Column: 5
def _get_next_step_name(control_name, base_name):
global _current_idx, _used_step_names
concat_name = '%s/%s' % (base_name, control_name)
next_name = concat_name
while next_name in _used_step_names:
next_name = '%s_%d' % (concat_name, _current_idx)
_current_idx += 1
Reported by Pylint.
Line: 40
Column: 1
return next_name
def _MakeList(input):
""" input is a tuple.
Example:
(a, b, c) --> [a, b, c]
(a) --> [a]
([a, b, c]) --> [a, b, c]
Reported by Pylint.
Line: 47
Column: 5
(a) --> [a]
([a, b, c]) --> [a, b, c]
"""
if len(input) == 0:
raise ValueError(
'input cannot be empty.')
elif len(input) == 1:
output = input[0]
if not isinstance(output, list):
Reported by Pylint.
Line: 59
Column: 1
return output
def _IsNets(nets_or_steps):
if isinstance(nets_or_steps, list):
return all(isinstance(n, core.Net) for n in nets_or_steps)
else:
return isinstance(nets_or_steps, core.Net)
Reported by Pylint.
torch/quantization/ns/graph_matcher.py
44 issues
Line: 11
Column: 1
from torch.fx.graph import Graph, Node
from torch.quantization.utils import getattr_from_fqn
from .ns_types import NSSubgraph, NSNodeTargetType
from .mappings import (
get_base_name_to_sets_of_related_ops,
get_unmatchable_types_map,
)
from .pattern_utils import (
Reported by Pylint.
Line: 12
Column: 1
from torch.quantization.utils import getattr_from_fqn
from .ns_types import NSSubgraph, NSNodeTargetType
from .mappings import (
get_base_name_to_sets_of_related_ops,
get_unmatchable_types_map,
)
from .pattern_utils import (
get_type_a_related_to_b,
Reported by Pylint.
Line: 16
Column: 1
get_base_name_to_sets_of_related_ops,
get_unmatchable_types_map,
)
from .pattern_utils import (
get_type_a_related_to_b,
get_reversed_fusions,
end_node_matches_reversed_fusion,
)
from torch.quantization import (
Reported by Pylint.
Line: 137
Column: 17
for inner_arg in arg:
self._recursively_add_node_arg_to_stack(inner_arg)
elif isinstance(arg, torch.fx.immutable_collections.immutable_dict):
for key, value in arg.items():
self._recursively_add_node_arg_to_stack(value)
def _is_matchable(self, node: Node) -> bool:
if node.op == 'call_function':
return not (node.target in self.non_matchable_functions)
Reported by Pylint.
Line: 158
Column: 5
"""
Exception raised when two graphs cannot be matched.
"""
pass
class SubgraphTypeRelationship(enum.Enum):
# same type, known
# example: F.linear and F.linear, or nn.Conv2d and nn.Conv2d
EQUAL = enum.auto()
Reported by Pylint.
Line: 183
Column: 3
node_a = subgraph_a.base_op_node
node_b = subgraph_b.base_op_node
# TODO(next): make this code handle matching by what is before the base op
if node_a.op != node_b.op:
if not (
node_a.op in ('call_function', 'call_method') and
node_b.op in ('call_function', 'call_method')
):
Reported by Pylint.
Line: 211
Column: 3
elif (not node_a_has_prev) and (not node_b_has_prev):
return SubgraphTypeRelationship.EQUAL
else:
# TODO(future PR): check for matches start_op_node and base_op_node
return SubgraphTypeRelationship.EQUAL
if key in type_a_related_to_b:
return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL
else:
Reported by Pylint.
Line: 1
Column: 1
import collections
import enum
import torch
toq = torch.ops.quantized
from torch.fx import GraphModule
from torch.fx.graph import Graph, Node
Reported by Pylint.
Line: 7
Column: 1
import torch
toq = torch.ops.quantized
from torch.fx import GraphModule
from torch.fx.graph import Graph, Node
from torch.quantization.utils import getattr_from_fqn
from .ns_types import NSSubgraph, NSNodeTargetType
from .mappings import (
Reported by Pylint.
Line: 8
Column: 1
toq = torch.ops.quantized
from torch.fx import GraphModule
from torch.fx.graph import Graph, Node
from torch.quantization.utils import getattr_from_fqn
from .ns_types import NSSubgraph, NSNodeTargetType
from .mappings import (
get_base_name_to_sets_of_related_ops,
Reported by Pylint.