The following issues were found
caffe2/quantization/server/resize_nearest_dnnlowp_op_test.py
28 issues
Line: 4
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
Reported by Pylint.
Line: 7
Column: 1
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
Reported by Pylint.
Line: 25
Column: 65
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=50)
def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
resize_nearest = core.CreateOperator(
"Int8ResizeNearest",
Reported by Pylint.
Line: 25
Column: 69
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=50)
def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
resize_nearest = core.CreateOperator(
"Int8ResizeNearest",
Reported by Pylint.
Line: 1
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
Reported by Pylint.
Line: 14
Column: 1
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPResizeNearestOpTest(hu.HypothesisTestCase):
@given(
N=st.integers(0, 3),
H=st.integers(10, 300),
W=st.integers(10, 300),
C=st.integers(1, 32),
Reported by Pylint.
Line: 24
Column: 5
scale_h=st.floats(0.25, 4.0) | st.just(2.0),
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=50)
def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
resize_nearest = core.CreateOperator(
Reported by Pylint.
Line: 24
Column: 5
scale_h=st.floats(0.25, 4.0) | st.just(2.0),
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=50)
def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
resize_nearest = core.CreateOperator(
Reported by Pylint.
Line: 24
Column: 5
scale_h=st.floats(0.25, 4.0) | st.just(2.0),
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=50)
def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
resize_nearest = core.CreateOperator(
Reported by Pylint.
Line: 24
Column: 5
scale_h=st.floats(0.25, 4.0) | st.just(2.0),
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=50)
def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
resize_nearest = core.CreateOperator(
Reported by Pylint.
benchmarks/operator_benchmark/pt/binary_test.py
28 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for binary operators."""
# Benchmark ops performance with broadcast
binary_ops_bcast_list = op_bench.op_list(
Reported by Pylint.
Line: 9
Column: 25
# Benchmark ops performance with broadcast
binary_ops_bcast_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['add', torch.add],
],
)
Reported by Pylint.
Line: 17
Column: 28
)
# Configs with broadcast
binary_configs_broadcast = op_bench.config_list(
attr_names=['in_one', 'in_two'],
attrs=[
[[64, 1, 64], [1, 64, 1]],
],
cross_product_configs={
Reported by Pylint.
Line: 30
Column: 30
)
class BinaryOpBcastBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_one, in_two, dtype, device, op_func):
self.inputs = {
"in_one": torch.randn(in_one, device=device).to(dtype=dtype),
"in_two": torch.randn(in_two, device=device).to(dtype=dtype)
}
Reported by Pylint.
Line: 42
Column: 1
return self.op_func(in_one, in_two)
op_bench.generate_pt_tests_from_op_list(binary_ops_bcast_list,
binary_configs_broadcast,
BinaryOpBcastBenchmark)
def copy(in1, in2):
Reported by Pylint.
Line: 51
Column: 19
return in1.copy_(in2)
# Benchmark ops performance without broadcast
binary_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['add', torch.add],
['copy_', copy],
],
Reported by Pylint.
Line: 59
Column: 24
],
)
binary_short_configs = op_bench.config_list(
attr_names=['M', 'N', 'K'],
attrs=[
[1, 1, 1],
[64, 64, 64],
[64, 64, 128],
Reported by Pylint.
Line: 74
Column: 23
tags=['short'],
)
binary_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
K=[256, 512],
device=['cpu', 'cuda'],
dtype_one=[torch.int8, torch.int32],
Reported by Pylint.
Line: 85
Column: 25
)
class BinaryOpBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype_one, dtype_two, op_func):
self.inputs = {
"input_one": torch.randn(M, N, K, device=device).to(dtype=dtype_one),
"input_two": torch.randn(M, N, K, device=device).to(dtype=dtype_two)
}
Reported by Pylint.
Line: 97
Column: 1
return self.op_func(input_one, input_two)
op_bench.generate_pt_tests_from_op_list(binary_ops_list,
binary_short_configs + binary_long_configs,
BinaryOpBenchmark)
if __name__ == "__main__":
Reported by Pylint.
tools/shared/cwrap_common.py
28 issues
Line: 48
Column: 3
declaration['name'], declaration['overload_name'])
else:
declaration['type_wrapper_name'] = declaration['name']
# TODO: Uggggh, parsing the schema string here, really???
declaration['operator_name_with_overload'] = declaration['schema_string'].split('(')[0]
if declaration['schema_string']:
declaration['unqual_schema_string'] = declaration['schema_string'].split('::')[1]
declaration['unqual_operator_name_with_overload'] = declaration['operator_name_with_overload'].split('::')[1]
else:
Reported by Pylint.
Line: 71
Column: 3
# Propagate defaults from declaration to options
for option in declaration['options']:
for k, v in declaration.items():
# TODO(zach): why does cwrap not propagate 'name'? I need it
# propagaged for ATen
if k != 'options':
option.setdefault(k, v)
# TODO(zach): added option to remove keyword handling for C++ which cannot
Reported by Pylint.
Line: 76
Column: 3
if k != 'options':
option.setdefault(k, v)
# TODO(zach): added option to remove keyword handling for C++ which cannot
# support it.
Option = Dict[str, Any]
Reported by Pylint.
Line: 1
Column: 1
# this code should be common among cwrap and ATen preprocessing
# for now, I have put it in one place but right now is copied out of cwrap
import copy
from typing import Any, Dict, Iterable, List, Union
Arg = Dict[str, Any]
def parse_arguments(args: List[Union[str, Arg]]) -> List[Arg]:
Reported by Pylint.
Line: 9
Column: 1
Arg = Dict[str, Any]
def parse_arguments(args: List[Union[str, Arg]]) -> List[Arg]:
new_args = []
for arg in args:
# Simple arg declaration of form "<type> <name>"
if isinstance(arg, str):
t, _, name = arg.partition(' ')
Reported by Pylint.
Line: 14
Column: 13
for arg in args:
# Simple arg declaration of form "<type> <name>"
if isinstance(arg, str):
t, _, name = arg.partition(' ')
new_args.append({'type': t, 'name': name})
elif isinstance(arg, dict):
if 'arg' in arg:
arg['type'], _, arg['name'] = arg['arg'].partition(' ')
del arg['arg']
Reported by Pylint.
Line: 29
Column: 1
Declaration = Dict[str, Any]
def set_declaration_defaults(declaration: Declaration) -> None:
if 'schema_string' not in declaration:
# This happens for legacy TH bindings like
# _thnn_conv_depthwise2d_backward
declaration['schema_string'] = ''
declaration.setdefault('arguments', [])
Reported by Pylint.
Line: 40
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
declaration['cname'] = declaration['name']
if 'backends' not in declaration:
declaration['backends'] = ['CPU', 'CUDA']
assert 'api_name' not in declaration
declaration['api_name'] = declaration['name']
# NB: keep this in sync with gen_autograd.py
if declaration.get('overload_name'):
declaration['type_wrapper_name'] = "{}_{}".format(
declaration['name'], declaration['overload_name'])
Reported by Bandit.
Line: 52
Column: 1
declaration['operator_name_with_overload'] = declaration['schema_string'].split('(')[0]
if declaration['schema_string']:
declaration['unqual_schema_string'] = declaration['schema_string'].split('::')[1]
declaration['unqual_operator_name_with_overload'] = declaration['operator_name_with_overload'].split('::')[1]
else:
declaration['unqual_schema_string'] = ''
declaration['unqual_operator_name_with_overload'] = ''
# Simulate multiple dispatch, even if it's not necessary
if 'options' not in declaration:
Reported by Pylint.
Line: 70
Column: 16
option['schema_order_arguments'] = parse_arguments(option['schema_order_arguments'])
# Propagate defaults from declaration to options
for option in declaration['options']:
for k, v in declaration.items():
# TODO(zach): why does cwrap not propagate 'name'? I need it
# propagaged for ATen
if k != 'options':
option.setdefault(k, v)
Reported by Pylint.
caffe2/python/context_test.py
28 issues
Line: 27
Column: 16
with MyContext() as a:
for _ in range(100):
self.assertTrue(MyContext.current() == a)
except Exception as e:
self._exceptions.append(e)
def testMultiThreaded(self):
threads = []
self._exceptions = []
Reported by Pylint.
Line: 32
Column: 9
def testMultiThreaded(self):
threads = []
self._exceptions = []
for _ in range(8):
thread = Thread(target=self.use_my_context)
thread.start()
threads.append(thread)
for t in threads:
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import context, test_util
from threading import Thread
Reported by Pylint.
Line: 7
Column: 1
from caffe2.python import context, test_util
from threading import Thread
class MyContext(context.Managed):
pass
Reported by Pylint.
Line: 10
Column: 1
from threading import Thread
class MyContext(context.Managed):
pass
class DefaultMyContext(context.DefaultManaged):
pass
Reported by Pylint.
Line: 10
Column: 1
from threading import Thread
class MyContext(context.Managed):
pass
class DefaultMyContext(context.DefaultManaged):
pass
Reported by Pylint.
Line: 13
Column: 1
class MyContext(context.Managed):
pass
class DefaultMyContext(context.DefaultManaged):
pass
class ChildMyContext(MyContext):
pass
Reported by Pylint.
Line: 13
Column: 1
class MyContext(context.Managed):
pass
class DefaultMyContext(context.DefaultManaged):
pass
class ChildMyContext(MyContext):
pass
Reported by Pylint.
Line: 16
Column: 1
class DefaultMyContext(context.DefaultManaged):
pass
class ChildMyContext(MyContext):
pass
class TestContext(test_util.TestCase):
def use_my_context(self):
Reported by Pylint.
Line: 16
Column: 1
class DefaultMyContext(context.DefaultManaged):
pass
class ChildMyContext(MyContext):
pass
class TestContext(test_util.TestCase):
def use_my_context(self):
Reported by Pylint.
torch/distributions/dirichlet.py
28 issues
Line: 11
Column: 12
# This helper is exposed for testing.
def _Dirichlet_backward(x, concentration, grad_output):
total = concentration.sum(-1, True).expand_as(concentration)
grad = torch._dirichlet_grad(x, concentration, total)
return grad * (grad_output - (x * grad_output).sum(-1, True))
class _Dirichlet(Function):
@staticmethod
Reported by Pylint.
Line: 18
Column: 13
class _Dirichlet(Function):
@staticmethod
def forward(ctx, concentration):
x = torch._sample_dirichlet(concentration)
ctx.save_for_backward(x, concentration)
return x
@staticmethod
@once_differentiable
Reported by Pylint.
Line: 56
Column: 23
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Dirichlet, _instance)
batch_shape = torch.Size(batch_shape)
new.concentration = self.concentration.expand(batch_shape + self.event_shape)
super(Dirichlet, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
Reported by Pylint.
Line: 70
Column: 18
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
torch.lgamma(self.concentration.sum(-1)) -
torch.lgamma(self.concentration).sum(-1))
@property
def mean(self):
Reported by Pylint.
Line: 71
Column: 17
if self._validate_args:
self._validate_sample(value)
return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
torch.lgamma(self.concentration.sum(-1)) -
torch.lgamma(self.concentration).sum(-1))
@property
def mean(self):
return self.concentration / self.concentration.sum(-1, True)
Reported by Pylint.
Line: 72
Column: 17
self._validate_sample(value)
return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
torch.lgamma(self.concentration.sum(-1)) -
torch.lgamma(self.concentration).sum(-1))
@property
def mean(self):
return self.concentration / self.concentration.sum(-1, True)
Reported by Pylint.
Line: 86
Column: 17
def entropy(self):
k = self.concentration.size(-1)
a0 = self.concentration.sum(-1)
return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
(k - a0) * torch.digamma(a0) -
((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
@property
def _natural_params(self):
Reported by Pylint.
Line: 86
Column: 60
def entropy(self):
k = self.concentration.size(-1)
a0 = self.concentration.sum(-1)
return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
(k - a0) * torch.digamma(a0) -
((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
@property
def _natural_params(self):
Reported by Pylint.
Line: 87
Column: 28
k = self.concentration.size(-1)
a0 = self.concentration.sum(-1)
return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
(k - a0) * torch.digamma(a0) -
((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
@property
def _natural_params(self):
return (self.concentration, )
Reported by Pylint.
Line: 88
Column: 47
a0 = self.concentration.sum(-1)
return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
(k - a0) * torch.digamma(a0) -
((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
@property
def _natural_params(self):
return (self.concentration, )
Reported by Pylint.
torch/optim/swa_utils.py
28 issues
Line: 93
Column: 30
if device is not None:
self.module = self.module.to(device)
self.register_buffer('n_averaged',
torch.tensor(0, dtype=torch.long, device=device))
if avg_fn is None:
def avg_fn(averaged_model_parameter, model_parameter, num_averaged):
return averaged_model_parameter + \
(model_parameter - averaged_model_parameter) / (num_averaged + 1)
self.avg_fn = avg_fn
Reported by Pylint.
Line: 93
Column: 52
if device is not None:
self.module = self.module.to(device)
self.register_buffer('n_averaged',
torch.tensor(0, dtype=torch.long, device=device))
if avg_fn is None:
def avg_fn(averaged_model_parameter, model_parameter, num_averaged):
return averaged_model_parameter + \
(model_parameter - averaged_model_parameter) / (num_averaged + 1)
self.avg_fn = avg_fn
Reported by Pylint.
Line: 107
Column: 16
for p_swa, p_model in zip(self.parameters(), model.parameters()):
device = p_swa.device
p_model_ = p_model.detach().to(device)
if self.n_averaged == 0:
p_swa.detach().copy_(p_model_)
else:
p_swa.detach().copy_(self.avg_fn(p_swa.detach(), p_model_,
self.n_averaged.to(device)))
self.n_averaged += 1
Reported by Pylint.
Line: 111
Column: 50
p_swa.detach().copy_(p_model_)
else:
p_swa.detach().copy_(self.avg_fn(p_swa.detach(), p_model_,
self.n_averaged.to(device)))
self.n_averaged += 1
@torch.no_grad()
def update_bn(loader, model, device=None):
Reported by Pylint.
Line: 112
Column: 9
else:
p_swa.detach().copy_(self.avg_fn(p_swa.detach(), p_model_,
self.n_averaged.to(device)))
self.n_averaged += 1
@torch.no_grad()
def update_bn(loader, model, device=None):
r"""Updates BatchNorm running_mean, running_var buffers in the model.
Reported by Pylint.
Line: 144
Column: 35
momenta = {}
for module in model.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
momenta[module] = module.momentum
if not momenta:
return
Reported by Pylint.
Line: 145
Column: 34
for module in model.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
momenta[module] = module.momentum
if not momenta:
return
Reported by Pylint.
Line: 257
Column: 16
return (lr - alpha * swa_lr) / (1 - alpha)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
step = self._step_count - 1
if self.anneal_epochs == 0:
step = max(1, step)
Reported by Pylint.
Line: 258
Column: 13
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
step = self._step_count - 1
if self.anneal_epochs == 0:
step = max(1, step)
prev_t = max(0, min(1, (step - 1) / max(1, self.anneal_epochs)))
Reported by Pylint.
Line: 143
Column: 31
"""
momenta = {}
for module in model.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
momenta[module] = module.momentum
if not momenta:
Reported by Pylint.
caffe2/python/onnx/helper.py
28 issues
Line: 9
Column: 1
from caffe2.proto import caffe2_pb2
from onnx.backend.base import namedtupledict
from caffe2.python.onnx.workspace import Workspace
import logging
import time
Reported by Pylint.
Line: 103
Column: 44
return results[0]
def benchmark_pytorch_model(model, inputs, training=False, warmup_iters=3,
main_iters=10, verbose=False):
'''
Run the model several times, and measure the execution time.
Return the execution time per iteration (millisecond).
'''
Reported by Pylint.
Line: 104
Column: 44
def benchmark_pytorch_model(model, inputs, training=False, warmup_iters=3,
main_iters=10, verbose=False):
'''
Run the model several times, and measure the execution time.
Return the execution time per iteration (millisecond).
'''
for _i in range(warmup_iters):
Reported by Pylint.
Line: 117
Column: 14
model(*inputs)
te = time.time()
total_pytorch_time += te - ts
log.info("The PyTorch model execution time per iter is {} milliseconds, "
"{} iters per second.".format(total_pytorch_time / main_iters * 1000,
main_iters / total_pytorch_time))
return total_pytorch_time * 1000 / main_iters
Reported by Pylint.
Line: 1
Column: 1
## @package onnx
# Module caffe2.python.onnx.helper
from caffe2.proto import caffe2_pb2
from onnx.backend.base import namedtupledict
Reported by Pylint.
Line: 11
Column: 1
from caffe2.proto import caffe2_pb2
from onnx.backend.base import namedtupledict
from caffe2.python.onnx.workspace import Workspace
import logging
import time
log = logging.getLogger(__name__)
Reported by Pylint.
Line: 11
Column: 1
from caffe2.proto import caffe2_pb2
from onnx.backend.base import namedtupledict
from caffe2.python.onnx.workspace import Workspace
import logging
import time
log = logging.getLogger(__name__)
Reported by Pylint.
Line: 12
Column: 1
from onnx.backend.base import namedtupledict
from caffe2.python.onnx.workspace import Workspace
import logging
import time
log = logging.getLogger(__name__)
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python.onnx.workspace import Workspace
import logging
import time
log = logging.getLogger(__name__)
Reported by Pylint.
Line: 19
Column: 1
log = logging.getLogger(__name__)
def c2_native_run_op(op_def, inputs):
ws = Workspace()
if isinstance(inputs, dict):
for key, value in inputs.items():
ws.FeedBlob(key, value, op_def.device_option)
else:
Reported by Pylint.
torch/sparse/__init__.py
28 issues
Line: 42
Column: 12
beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
"""
return torch._sparse_addmm(mat, mat1, mat2, beta=beta, alpha=alpha)
def mm(mat1: Tensor, mat2: Tensor) -> Tensor:
r"""
Performs a matrix multiplication of the sparse matrix :attr:`mat1`
Reported by Pylint.
Line: 90
Column: 16
size=(2, 3), nnz=6, layout=torch.sparse_coo)
"""
if mat1.is_sparse and mat2.is_sparse:
return torch._sparse_sparse_matmul(mat1, mat2)
return torch._sparse_mm(mat1, mat2)
def sum(input: Tensor, dim: DimOrDims = None,
dtype: Optional[DType] = None) -> Tensor:
Reported by Pylint.
Line: 91
Column: 12
"""
if mat1.is_sparse and mat2.is_sparse:
return torch._sparse_sparse_matmul(mat1, mat2)
return torch._sparse_mm(mat1, mat2)
def sum(input: Tensor, dim: DimOrDims = None,
dtype: Optional[DType] = None) -> Tensor:
r"""
Reported by Pylint.
Line: 152
Column: 20
"""
if dtype is None:
if dim is not None:
return torch._sparse_sum(input, dim)
else:
return torch._sparse_sum(input)
else:
if dim is not None:
return torch._sparse_sum(input, dim, dtype=dtype)
Reported by Pylint.
Line: 154
Column: 20
if dim is not None:
return torch._sparse_sum(input, dim)
else:
return torch._sparse_sum(input)
else:
if dim is not None:
return torch._sparse_sum(input, dim, dtype=dtype)
else:
return torch._sparse_sum(input, dtype=dtype)
Reported by Pylint.
Line: 157
Column: 20
return torch._sparse_sum(input)
else:
if dim is not None:
return torch._sparse_sum(input, dim, dtype=dtype)
else:
return torch._sparse_sum(input, dtype=dtype)
def softmax(input: Tensor, dim: int, dtype: Optional[DType] = None) -> Tensor:
Reported by Pylint.
Line: 159
Column: 20
if dim is not None:
return torch._sparse_sum(input, dim, dtype=dtype)
else:
return torch._sparse_sum(input, dtype=dtype)
def softmax(input: Tensor, dim: int, dtype: Optional[DType] = None) -> Tensor:
r"""Applies a softmax function.
Reported by Pylint.
Line: 186
Column: 12
performed. This is useful for preventing data type
overflows. Default: None
"""
return torch._sparse_softmax(input, dim, dtype=dtype)
def log_softmax(input: Tensor, dim: int, dtype: Optional[DType] = None) -> Tensor:
r"""Applies a softmax function followed by logarithm.
Reported by Pylint.
Line: 203
Column: 12
performed. This is useful for preventing data type
overflows. Default: None
"""
return torch._sparse_log_softmax(input, dim, dtype=dtype)
Reported by Pylint.
Line: 42
Column: 12
beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
"""
return torch._sparse_addmm(mat, mat1, mat2, beta=beta, alpha=alpha)
def mm(mat1: Tensor, mat2: Tensor) -> Tensor:
r"""
Performs a matrix multiplication of the sparse matrix :attr:`mat1`
Reported by Pylint.
caffe2/python/mkl/rewrite_graph_test.py
28 issues
Line: 9
Column: 1
import unittest
import numpy as np
import copy
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python.model_helper import ModelHelper
from caffe2.python.models import resnet
from caffe2.python import workspace, brew
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
import copy
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python.model_helper import ModelHelper
from caffe2.python.models import resnet
from caffe2.python import workspace, brew
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 183
Column: 22
return model, [(1, 1, 224, 224)]
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class MKLRewriteTest(hu.HypothesisTestCase):
@given(gen=st.sampled_from([simple_relu, simple_fc,
simple_mlp, simple_cnn]))
def test_mkl_simple_rewrite(self, gen):
cpu_model, (shape,) = gen()
Reported by Pylint.
Line: 254
Column: 5
atol=1e-4, rtol=1e-4)
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import numpy as np
import copy
from hypothesis import given
Reported by Pylint.
Line: 8
Column: 1
import unittest
import numpy as np
import copy
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python.model_helper import ModelHelper
from caffe2.python.models import resnet
Reported by Pylint.
Line: 19
Column: 1
import caffe2.python.mkl.rewrite_graph as rewrite_graph
def deterministic_io(model):
model = copy.deepcopy(model)
for i, op in enumerate(model.InitProto().op):
op.device_option.random_seed = i + 1
if not model.Proto().external_output:
model.Proto().external_output.extend([model.Proto().op[-1].output[0]])
Reported by Pylint.
Line: 21
Column: 12
def deterministic_io(model):
model = copy.deepcopy(model)
for i, op in enumerate(model.InitProto().op):
op.device_option.random_seed = i + 1
if not model.Proto().external_output:
model.Proto().external_output.extend([model.Proto().op[-1].output[0]])
return model
Reported by Pylint.
Line: 27
Column: 1
model.Proto().external_output.extend([model.Proto().op[-1].output[0]])
return model
def simple_fc():
model = ModelHelper(name="r")
brew.fc(model, "data", "fc", 10, 10)
return model, [(1, 10)]
def double_matmul():
Reported by Pylint.
Line: 32
Column: 1
brew.fc(model, "data", "fc", 10, 10)
return model, [(1, 10)]
def double_matmul():
model = ModelHelper(name="r")
fc0 = brew.fc(model, "data", "fc0", 10, 10)
fc1 = brew.fc(model, fc0, "fc1", 10, 10)
model.Proto().external_output[:] = [str(fc0), str(fc1)]
return model, [(1, 10)]
Reported by Pylint.
torch/distributed/rpc/backend_registry.py
28 issues
Line: 9
Column: 1
import torch
import torch.distributed as dist
from . import api
from . import constants as rpc_constants
BackendValue = collections.namedtuple(
"BackendValue", ["construct_rpc_backend_options_handler", "init_backend_handler"]
Reported by Pylint.
Line: 10
Column: 1
import torch.distributed as dist
from . import api
from . import constants as rpc_constants
BackendValue = collections.namedtuple(
"BackendValue", ["construct_rpc_backend_options_handler", "init_backend_handler"]
)
Reported by Pylint.
Line: 138
Column: 5
_channels=None,
**kwargs
):
from . import TensorPipeRpcBackendOptions
return TensorPipeRpcBackendOptions(
rpc_timeout=rpc_timeout,
init_method=init_method,
num_worker_threads=num_worker_threads,
Reported by Pylint.
Line: 162
Column: 69
my_name, my_device_count, my_device_maps, my_devices, group
):
gathered: List[Tuple[
str, int, Dict[str, Dict[torch.device, torch.device]], List[torch.device]
]] = [("", 0, {}, []) for _ in range(group.size())]
dist.all_gather_object(
gathered, (my_name, my_device_count, my_device_maps, my_devices), group
)
all_names = [name for name, _, _, _ in gathered]
Reported by Pylint.
Line: 162
Column: 34
my_name, my_device_count, my_device_maps, my_devices, group
):
gathered: List[Tuple[
str, int, Dict[str, Dict[torch.device, torch.device]], List[torch.device]
]] = [("", 0, {}, []) for _ in range(group.size())]
dist.all_gather_object(
gathered, (my_name, my_device_count, my_device_maps, my_devices), group
)
all_names = [name for name, _, _, _ in gathered]
Reported by Pylint.
Line: 162
Column: 48
my_name, my_device_count, my_device_maps, my_devices, group
):
gathered: List[Tuple[
str, int, Dict[str, Dict[torch.device, torch.device]], List[torch.device]
]] = [("", 0, {}, []) for _ in range(group.size())]
dist.all_gather_object(
gathered, (my_name, my_device_count, my_device_maps, my_devices), group
)
all_names = [name for name, _, _, _ in gathered]
Reported by Pylint.
Line: 236
Column: 41
)
# passed all checked, construct reverse mapping for return values
reverse_device_maps: Dict[str, Dict[torch.device, torch.device]] = {}
for node in all_names:
if my_name in all_device_maps[node]:
reverse_device_maps[node] = {
v: k for k, v in all_device_maps[node][my_name].items()
}
Reported by Pylint.
Line: 236
Column: 55
)
# passed all checked, construct reverse mapping for return values
reverse_device_maps: Dict[str, Dict[torch.device, torch.device]] = {}
for node in all_names:
if my_name in all_device_maps[node]:
reverse_device_maps[node] = {
v: k for k, v in all_device_maps[node][my_name].items()
}
Reported by Pylint.
Line: 244
Column: 26
}
if not my_devices:
devices_set: Set[torch.device] = set()
for _, map_ in my_device_maps.items():
devices_set.update(map_.keys())
for _, map_ in reverse_device_maps.items():
devices_set.update(map_.keys())
devices_set.discard(torch.device("cpu"))
Reported by Pylint.
Line: 249
Column: 29
devices_set.update(map_.keys())
for _, map_ in reverse_device_maps.items():
devices_set.update(map_.keys())
devices_set.discard(torch.device("cpu"))
my_devices = list(devices_set)
my_devices = sorted(my_devices, key=lambda d: d.index)
return reverse_device_maps, my_devices
Reported by Pylint.