The following issues were found
benchmarks/tensorexpr/elementwise.py
81 issues
Line: 1
Column: 1
from . import benchmark
import itertools
import numpy as np
import torch
import scipy.special
# A template class for elementwise operations.
# A derived class will override the class instance to customize its behavior.
class ElementBench(benchmark.Benchmark):
Reported by Pylint.
Line: 4
Column: 1
from . import benchmark
import itertools
import numpy as np
import torch
import scipy.special
# A template class for elementwise operations.
# A derived class will override the class instance to customize its behavior.
class ElementBench(benchmark.Benchmark):
Reported by Pylint.
Line: 5
Column: 1
import itertools
import numpy as np
import torch
import scipy.special
# A template class for elementwise operations.
# A derived class will override the class instance to customize its behavior.
class ElementBench(benchmark.Benchmark):
# List of customization class variables.
Reported by Pylint.
Line: 26
Column: 44
self.d3 = self.rand([N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.d4 = self.rand([N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.inputs = [self.d1, self.d2, self.d3, self.d4]
self.deterministic = "rand" not in self.op_str
def _eval(self, d1, d2, d3, d4, binary_op, unary_op):
if not binary_op:
def binary_op(x, y):
return x + y
Reported by Pylint.
Line: 78
Column: 26
else:
sol_count = 1 + 1
algorithmic_count = 1 + 1
if "rand" in self.op_str:
sol_count = 1
algorithmic_count = 1
else:
if self.split_input:
sol_count = (input_count + 1) + (1 + input_count)
Reported by Pylint.
Line: 88
Column: 26
else:
sol_count = 1 + 1
algorithmic_count = 1 + 1
if "rand" in self.op_str:
sol_count = 1
algorithmic_count = 1
buffer_size = self.N
return {
Reported by Pylint.
Line: 111
Column: 13
["div", lambda a, b: a / (b + 1e-4)],
[
"pow",
lambda a, b: torch.pow(a, b),
lambda a, b: np.power(a, b),
], # no fuson triggered
["max", lambda a, b: torch.max(a, b), lambda a, b: np.maximum(a, b)],
["min", lambda a, b: torch.min(a, b), lambda a, b: np.minimum(a, b)],
]
Reported by Pylint.
Line: 112
Column: 13
[
"pow",
lambda a, b: torch.pow(a, b),
lambda a, b: np.power(a, b),
], # no fuson triggered
["max", lambda a, b: torch.max(a, b), lambda a, b: np.maximum(a, b)],
["min", lambda a, b: torch.min(a, b), lambda a, b: np.minimum(a, b)],
]
Reported by Pylint.
Line: 114
Column: 17
lambda a, b: torch.pow(a, b),
lambda a, b: np.power(a, b),
], # no fuson triggered
["max", lambda a, b: torch.max(a, b), lambda a, b: np.maximum(a, b)],
["min", lambda a, b: torch.min(a, b), lambda a, b: np.minimum(a, b)],
]
unary_op_list = [
["erf", lambda x: torch.erf(x), lambda x: scipy.special.erf(x)],
Reported by Pylint.
Line: 114
Column: 47
lambda a, b: torch.pow(a, b),
lambda a, b: np.power(a, b),
], # no fuson triggered
["max", lambda a, b: torch.max(a, b), lambda a, b: np.maximum(a, b)],
["min", lambda a, b: torch.min(a, b), lambda a, b: np.minimum(a, b)],
]
unary_op_list = [
["erf", lambda x: torch.erf(x), lambda x: scipy.special.erf(x)],
Reported by Pylint.
caffe2/python/net_builder_test.py
81 issues
Line: 91
Column: 14
q = _test_if(ops.Const(25))
plan = Plan('name')
plan.AddStep(to_execution_step(nb))
ws = workspace.C.Workspace()
ws.run(plan)
expected = [
(y, 5),
(z, False),
(w, True),
Reported by Pylint.
Line: 297
Column: 14
plan = Plan('if_net_test')
plan.AddStep(to_execution_step(nb))
ws = workspace.C.Workspace()
ws.run(plan)
first_res_value = ws.blobs[str(first_res)].fetch()
second_res_value = ws.blobs[str(second_res)].fetch()
y0_value = ws.blobs[str(y0)].fetch()
Reported by Pylint.
Line: 325
Column: 14
plan = Plan('while_net_test')
plan.AddStep(to_execution_step(nb))
ws = workspace.C.Workspace()
ws.run(plan)
x_value = ws.blobs[str(x)].fetch()
y_value = ws.blobs[str(y)].fetch()
Reported by Pylint.
Line: 26
Column: 23
PythonOpStats.num_instances += 1
PythonOpStats.lock.release()
def my_op(inputs, outputs):
PythonOpStats.lock.acquire()
PythonOpStats.num_calls += 1
PythonOpStats.lock.release()
return my_op
Reported by Pylint.
Line: 26
Column: 15
PythonOpStats.num_instances += 1
PythonOpStats.lock.release()
def my_op(inputs, outputs):
PythonOpStats.lock.acquire()
PythonOpStats.num_calls += 1
PythonOpStats.lock.release()
return my_op
Reported by Pylint.
Line: 104
Column: 13
]
for b, expected in expected:
actual = ws.blobs[str(b)].fetch()
self.assertEquals(actual, expected)
def _expected_loop(self):
total = 0
total_large = 0
total_small = 0
Reported by Pylint.
Line: 155
Column: 13
result = final_output(total)
with LocalSession() as session:
session.run(task)
self.assertEquals(2, result.fetch())
def test_loops(self):
with Task() as task:
out_actual = self._actual_loop()
with LocalSession() as session:
Reported by Pylint.
Line: 165
Column: 17
expected = self._expected_loop()
actual = [o.fetch() for o in out_actual]
for e, a in zip(expected, actual):
self.assertEquals(e, a)
def test_setup(self):
with Task() as task:
with ops.task_init():
one = ops.Const(1)
Reported by Pylint.
Line: 187
Column: 13
o7_2 = final_output(seven_2)
with LocalSession() as session:
session.run(task)
self.assertEquals(o6.fetch(), 6)
self.assertEquals(o7_1.fetch(), 7)
self.assertEquals(o7_2.fetch(), 7)
def test_multi_instance_python_op(self):
"""
Reported by Pylint.
Line: 188
Column: 13
with LocalSession() as session:
session.run(task)
self.assertEquals(o6.fetch(), 6)
self.assertEquals(o7_1.fetch(), 7)
self.assertEquals(o7_2.fetch(), 7)
def test_multi_instance_python_op(self):
"""
When task instances are created at runtime, C++ concurrently creates
Reported by Pylint.
torch/fx/experimental/accelerator_partitioner.py
80 issues
Line: 71
Column: 1
module_with_submodules: GraphModule
"""Followings are some helper functions for partition manipulation"""
def reset_partition_device(partitions):
for partition in partitions:
partition.logical_device_ids = []
Reported by Pylint.
Line: 316
Column: 9
find the partitions, do the partitions,
and then return a DAG and a new fx module with submodule nodes (partitions)
"""
self.graph_module = fx_module
self.torch_module = torch_module
self.devices = partitioner_config.devices
if len(self.devices) == 0:
raise RuntimeError("No devices")
# Tag the size in bytes to all nodes in the graph_module.
Reported by Pylint.
Line: 317
Column: 9
and then return a DAG and a new fx module with submodule nodes (partitions)
"""
self.graph_module = fx_module
self.torch_module = torch_module
self.devices = partitioner_config.devices
if len(self.devices) == 0:
raise RuntimeError("No devices")
# Tag the size in bytes to all nodes in the graph_module.
get_size_of_all_nodes(self.graph_module)
Reported by Pylint.
Line: 358
Column: 3
):
raise RuntimeError("All devices must have same memory size!")
# sparse_nn_partition only support same memory size
# TODO: add different size support for sparse_nn_partition
self.sparse_nn_partition(available_mem_bytes)
# Cost aware partition
elif partitioner_config.mode == PartitionMode.cost_aware:
self.cost_aware_partition(
partitioner_config.transfer_rate_bytes_per_sec,
Reported by Pylint.
Line: 470
Column: 33
if len(self.partitions) == len(self.devices):
# No device is left
# Put the previous partitions into a list (non_single_node_partitions)
non_single_node_partitions = self.partitions[:]
# Create the first single node partition for the current node
self.create_single_node_partition(node)
continue
# Some devices are still left
# Create a new partition with a mem size that is enough for the current node
Reported by Pylint.
Line: 587
Column: 16
break
if node.op in {"placeholder", "get_attr"}:
continue
if node.target == operator.__getitem__:
continue
input_nodes: Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
# When a node has two or more output nodes,
Reported by Pylint.
Line: 590
Column: 32
if node.target == operator.__getitem__:
continue
input_nodes: Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
# When a node has two or more output nodes,
# it outputs its result to 'getitem' nodes.
# Those 'getitem' nodes are the output node for this node.
# Otherwise, the output node is this node itself.
Reported by Pylint.
Line: 590
Column: 42
if node.target == operator.__getitem__:
continue
input_nodes: Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
# When a node has two or more output nodes,
# it outputs its result to 'getitem' nodes.
# Those 'getitem' nodes are the output node for this node.
# Otherwise, the output node is this node itself.
Reported by Pylint.
Line: 591
Column: 34
continue
input_nodes: Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
# When a node has two or more output nodes,
# it outputs its result to 'getitem' nodes.
# Those 'getitem' nodes are the output node for this node.
# Otherwise, the output node is this node itself.
if len(node.users) > 1:
Reported by Pylint.
Line: 591
Column: 44
continue
input_nodes: Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
# When a node has two or more output nodes,
# it outputs its result to 'getitem' nodes.
# Those 'getitem' nodes are the output node for this node.
# Otherwise, the output node is this node itself.
if len(node.users) > 1:
Reported by Pylint.
test/fx/quantization.py
80 issues
Line: 5
Column: 1
**This file is EXPERIMENTAL and is mostly used for testing purposes! Do not
rely on it for anything!**
'''
from torch.fx import Graph, GraphModule
from torch.fx.graph import map_arg
from torch.fx.proxy import Proxy
import sys
import torch
from torch.nn.utils import fuse_conv_bn_weights
Reported by Pylint.
Line: 6
Column: 1
rely on it for anything!**
'''
from torch.fx import Graph, GraphModule
from torch.fx.graph import map_arg
from torch.fx.proxy import Proxy
import sys
import torch
from torch.nn.utils import fuse_conv_bn_weights
import operator
Reported by Pylint.
Line: 7
Column: 1
'''
from torch.fx import Graph, GraphModule
from torch.fx.graph import map_arg
from torch.fx.proxy import Proxy
import sys
import torch
from torch.nn.utils import fuse_conv_bn_weights
import operator
Reported by Pylint.
Line: 9
Column: 1
from torch.fx.graph import map_arg
from torch.fx.proxy import Proxy
import sys
import torch
from torch.nn.utils import fuse_conv_bn_weights
import operator
# can be a
# module type, a builtin function, or a string to match target
Reported by Pylint.
Line: 10
Column: 1
from torch.fx.proxy import Proxy
import sys
import torch
from torch.nn.utils import fuse_conv_bn_weights
import operator
# can be a
# module type, a builtin function, or a string to match target
Reported by Pylint.
Line: 222
Column: 23
def load_arg(a):
return map_arg(a, lambda node: env[node.name])
output_node : Optional[Node] = None
for node in self.graph.nodes:
if node.op == 'placeholder':
result = next(args_iter)
elif node.op == 'get_attr':
result = self.state_dict[node.target]
Reported by Pylint.
Line: 222
Column: 32
def load_arg(a):
return map_arg(a, lambda node: env[node.name])
output_node : Optional[Node] = None
for node in self.graph.nodes:
if node.op == 'placeholder':
result = next(args_iter)
elif node.op == 'get_attr':
result = self.state_dict[node.target]
Reported by Pylint.
Line: 266
Column: 37
def copy_recursive(node):
def load_or_emit(n):
if n.name in env or e.name in quant_env:
return load_arg(n, quantized=False)
else:
return copy_recusive(n)
r = env[node.name] = self.quantized_graph.node_copy(node, lambda n: load_arg(n, quantized=False))
return r
Reported by Pylint.
Line: 269
Column: 28
if n.name in env or e.name in quant_env:
return load_arg(n, quantized=False)
else:
return copy_recusive(n)
r = env[node.name] = self.quantized_graph.node_copy(node, lambda n: load_arg(n, quantized=False))
return r
for node in self.graph.nodes:
root_node, obj = self.matches.get(node.name, (None, None))
Reported by Pylint.
Line: 31
Column: 35
return scale, zero_point
class MinMaxObserver:
def __init__(self, quantizer, node):
self.min, self.max = float('inf'), float('-inf')
self.all_tensors = True
def observe(self, node, env):
v = env[node.name]
Reported by Pylint.
torch/quantization/_numeric_suite_fx.py
80 issues
Line: 16
Column: 1
get_type_a_related_to_b,
)
from .ns.weight_utils import (
extract_weight_from_node,
)
from .ns.graph_passes import (
add_loggers_to_model,
Reported by Pylint.
Line: 20
Column: 1
extract_weight_from_node,
)
from .ns.graph_passes import (
add_loggers_to_model,
create_a_shadows_b,
)
from .ns.utils import (
Reported by Pylint.
Line: 25
Column: 1
create_a_shadows_b,
)
from .ns.utils import (
rekey_logger_info_on_node_name_of_model,
maybe_add_missing_fqns,
get_target_type_str,
)
Reported by Pylint.
Line: 31
Column: 1
get_target_type_str,
)
from .ns.ns_types import (
NSSingleResultValuesType,
NSResultsType,
NSNodeTargetType,
)
Reported by Pylint.
Line: 139
Column: 5
results: NSResultsType,
op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
) -> None:
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_weights_one_model")
for node, ref_name in nodes_and_names_to_instrument:
res_type = NSSingleResultValuesType.WEIGHT.value
extracted_weight = extract_weight_from_node(
node, model, op_to_type_to_weight_extraction_fn)
if extracted_weight:
Reported by Pylint.
Line: 139
Column: 5
results: NSResultsType,
op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
) -> None:
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_weights_one_model")
for node, ref_name in nodes_and_names_to_instrument:
res_type = NSSingleResultValuesType.WEIGHT.value
extracted_weight = extract_weight_from_node(
node, model, op_to_type_to_weight_extraction_fn)
if extracted_weight:
Reported by Pylint.
Line: 159
Column: 5
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
) -> NSResultsType:
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_weights_impl")
matched_subgraph_pairs = get_matching_subgraph_pairs(
gm_a, gm_b, base_name_to_sets_of_related_ops,
unmatchable_types_map)
# split the subgraph pairs into one data structure for each model
Reported by Pylint.
Line: 159
Column: 5
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
) -> NSResultsType:
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_weights_impl")
matched_subgraph_pairs = get_matching_subgraph_pairs(
gm_a, gm_b, base_name_to_sets_of_related_ops,
unmatchable_types_map)
# split the subgraph pairs into one data structure for each model
Reported by Pylint.
Line: 199
Column: 5
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
) -> NSResultsType:
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.extract_weights")
if base_name_to_sets_of_related_ops is None:
base_name_to_sets_of_related_ops = \
get_base_name_to_sets_of_related_ops()
type_a_related_to_b = \
get_type_a_related_to_b(base_name_to_sets_of_related_ops)
Reported by Pylint.
Line: 199
Column: 5
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
) -> NSResultsType:
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.extract_weights")
if base_name_to_sets_of_related_ops is None:
base_name_to_sets_of_related_ops = \
get_base_name_to_sets_of_related_ops()
type_a_related_to_b = \
get_type_a_related_to_b(base_name_to_sets_of_related_ops)
Reported by Pylint.
torch/nn/intrinsic/qat/modules/conv_fused.py
78 issues
Line: 22
Column: 42
MOD = TypeVar('MOD', bound=nn.modules.conv._ConvNd)
class _ConvBnNd(nn.modules.conv._ConvNd, nni._FusedModule):
_version = 2
_FLOAT_MODULE = MOD
def __init__(self,
Reported by Pylint.
Line: 52
Column: 35
self.bn = _BN_CLASS_MAP[dim](out_channels, eps, momentum, True, True)
self.weight_fake_quant = self.qconfig.weight()
if bias:
self.bias = Parameter(torch.empty(out_channels))
else:
self.register_parameter('bias', None)
self.reset_bn_parameters()
# this needs to be called after reset_bn_parameters,
Reported by Pylint.
Line: 95
Column: 23
def _forward(self, input):
assert self.bn.running_var is not None
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
scale_factor = self.bn.weight / running_std
weight_shape = [1] * len(self.weight.shape)
weight_shape[0] = -1
bias_shape = [1] * len(self.weight.shape)
bias_shape[1] = -1
Reported by Pylint.
Line: 105
Column: 25
# using zero bias here since the bias for original conv
# will be added later
if self.bias is not None:
zero_bias = torch.zeros_like(self.bias)
else:
zero_bias = torch.zeros(self.out_channels, device=scaled_weight.device)
conv = self._conv_forward(input, scaled_weight, zero_bias)
conv_orig = conv / scale_factor.reshape(bias_shape)
if self.bias is not None:
Reported by Pylint.
Line: 107
Column: 25
if self.bias is not None:
zero_bias = torch.zeros_like(self.bias)
else:
zero_bias = torch.zeros(self.out_channels, device=scaled_weight.device)
conv = self._conv_forward(input, scaled_weight, zero_bias)
conv_orig = conv / scale_factor.reshape(bias_shape)
if self.bias is not None:
conv_orig = conv_orig + self.bias.reshape(bias_shape)
conv = self.bn(conv_orig)
Reported by Pylint.
Line: 108
Column: 9
zero_bias = torch.zeros_like(self.bias)
else:
zero_bias = torch.zeros(self.out_channels, device=scaled_weight.device)
conv = self._conv_forward(input, scaled_weight, zero_bias)
conv_orig = conv / scale_factor.reshape(bias_shape)
if self.bias is not None:
conv_orig = conv_orig + self.bias.reshape(bias_shape)
conv = self.bn(conv_orig)
return conv
Reported by Pylint.
Line: 254
Column: 9
modules.append(relu)
result = cls._FLOAT_MODULE(*modules) # type: ignore[operator]
result.train(self.training)
return result
class ConvBn1d(_ConvBnNd, nn.Conv1d):
r"""
A ConvBn1d module is a module fused from Conv1d and BatchNorm1d,
Reported by Pylint.
Line: 443
Column: 32
def from_float(cls, mod):
return super(ConvBnReLU2d, cls).from_float(mod)
class ConvReLU2d(nnqat.Conv2d, nni._FusedModule):
r"""A ConvReLU2d module is a fused module of Conv2d and ReLU, attached with
FakeQuantize modules for weight for
quantization aware training.
We combined the interface of :class:`~torch.nn.Conv2d` and
Reported by Pylint.
Line: 614
Column: 32
def from_float(cls, mod):
return super(ConvBnReLU3d, cls).from_float(mod)
class ConvReLU3d(nnqat.Conv3d, nni._FusedModule):
r"""A ConvReLU3d module is a fused module of Conv3d and ReLU, attached with
FakeQuantize modules for weight for
quantization aware training.
We combined the interface of :class:`~torch.nn.Conv3d` and
Reported by Pylint.
Line: 19
Column: 28
}
MOD = TypeVar('MOD', bound=nn.modules.conv._ConvNd)
class _ConvBnNd(nn.modules.conv._ConvNd, nni._FusedModule):
_version = 2
Reported by Pylint.
torch/nn/quantized/functional.py
78 issues
Line: 115
Column: 18
stride=1, padding=0, dilation=1, groups=1,
padding_mode='zeros',
scale=1.0, zero_point=0,
dtype=torch.quint8):
r"""
Applies a 1D convolution over a quantized 1D input composed of several input
planes.
See :class:`~torch.nn.quantized.Conv1d` for details and output shape.
Reported by Pylint.
Line: 156
Column: 23
""" # noqa: E501
if padding_mode != 'zeros':
raise NotImplementedError("Only zero-padding is supported!")
if input.dtype != torch.quint8:
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
if weight.dtype != torch.qint8:
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
if input.ndim != 3:
raise ValueError("Input shape must be `(N, C, L)`!")
Reported by Pylint.
Line: 158
Column: 24
raise NotImplementedError("Only zero-padding is supported!")
if input.dtype != torch.quint8:
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
if weight.dtype != torch.qint8:
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
if input.ndim != 3:
raise ValueError("Input shape must be `(N, C, L)`!")
stride = _pair_from_first(stride)
padding = _pair_from_first(padding)
Reported by Pylint.
Line: 174
Column: 18
stride=1, padding=0, dilation=1, groups=1,
padding_mode='zeros',
scale=1.0, zero_point=0,
dtype=torch.quint8):
r"""
Applies a 2D convolution over a quantized 2D input composed of several input
planes.
See :class:`~torch.nn.quantized.Conv2d` for details and output shape.
Reported by Pylint.
Line: 215
Column: 23
""" # noqa: E501
if padding_mode != 'zeros':
raise NotImplementedError("Only zero-padding is supported!")
if input.dtype != torch.quint8:
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
if weight.dtype != torch.qint8:
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
if input.ndim != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
Reported by Pylint.
Line: 217
Column: 24
raise NotImplementedError("Only zero-padding is supported!")
if input.dtype != torch.quint8:
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
if weight.dtype != torch.qint8:
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
if input.ndim != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
stride = _pair(stride)
padding = _pair(padding)
Reported by Pylint.
Line: 230
Column: 65
return torch.ops.quantized.conv2d(input, packed_params, scale, zero_point)
def conv3d(input, weight, bias, stride=1, padding=0, dilation=1, groups=1,
padding_mode='zeros', scale=1.0, zero_point=0, dtype=torch.quint8):
r"""
Applies a 3D convolution over a quantized 3D input composed of several input
planes.
See :class:`~torch.nn.quantized.Conv3d` for details and output shape.
Reported by Pylint.
Line: 275
Column: 23
""" # noqa: E501
if padding_mode != 'zeros':
raise NotImplementedError("Only zero-padding is supported!")
if input.dtype != torch.quint8:
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
if weight.dtype != torch.qint8:
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
if input.ndim != 5:
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
Reported by Pylint.
Line: 277
Column: 24
raise NotImplementedError("Only zero-padding is supported!")
if input.dtype != torch.quint8:
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
if weight.dtype != torch.qint8:
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
if input.ndim != 5:
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
stride = _triple(stride)
padding = _triple(padding)
Reported by Pylint.
Line: 432
Column: 18
"""
if scale is not None and zero_point is not None:
assert not inplace, "Cannot rescale with `inplace`"
output = torch._empty_affine_quantized(
input.shape, scale=scale, zero_point=int(zero_point), dtype=input.dtype)
torch._C._nn.leaky_relu(input, negative_slope, out=output)
return output
if inplace:
result = torch._C._nn.leaky_relu_(input, negative_slope)
Reported by Pylint.
test/distributed/pipeline/sync/test_microbatch.py
78 issues
Line: 7
Column: 1
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import torch.cuda
from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
Reported by Pylint.
Line: 8
Column: 1
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import torch.cuda
from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
Reported by Pylint.
Line: 9
Column: 1
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import torch.cuda
from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
def test_batch_atomic():
Reported by Pylint.
Line: 11
Column: 1
import torch
import torch.cuda
from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
def test_batch_atomic():
x = torch.tensor(42)
b = Batch(x)
Reported by Pylint.
Line: 22
Column: 9
assert b.tensor is x
with pytest.raises(AttributeError):
b.tensors
assert list(b) == [x]
assert len(b) == 1
assert b[0] is x
Reported by Pylint.
Line: 36
Column: 9
assert not b.atomic
with pytest.raises(AttributeError):
b.tensor
assert list(b) == [x, y]
assert len(b) == 2
assert b[0] is x
assert b[1] is y
Reported by Pylint.
Line: 1
Column: 1
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import torch.cuda
Reported by Pylint.
Line: 14
Column: 1
from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
def test_batch_atomic():
x = torch.tensor(42)
b = Batch(x)
assert b.atomic
Reported by Pylint.
Line: 15
Column: 5
def test_batch_atomic():
x = torch.tensor(42)
b = Batch(x)
assert b.atomic
assert b.tensor is x
Reported by Pylint.
Line: 16
Column: 5
def test_batch_atomic():
x = torch.tensor(42)
b = Batch(x)
assert b.atomic
assert b.tensor is x
with pytest.raises(AttributeError):
Reported by Pylint.
caffe2/python/model_helper.py
77 issues
Line: 434
Column: 26
raise AttributeError(
'Method ' + op_type + ' is not a registered operator.' +
' Did you mean: [' +
','.join(workspace.C.nearby_opnames(op_type)) + ']'
)
if op_type not in _known_working_ops:
if not self.allow_not_known_ops:
raise AttributeError(
"Operator {} is not known to be safe".format(op_type))
Reported by Pylint.
Line: 278
Column: 52
def _NormalizeNamescope(namescope):
if namescope is None:
return scope.CurrentNameScope()
elif namescope == '' or namescope.endswith(scope._NAMESCOPE_SEPARATOR):
return namescope
else:
return namescope + scope._NAMESCOPE_SEPARATOR
def GetParams(self, namescope=None, top_scope=False):
Reported by Pylint.
Line: 281
Column: 32
elif namescope == '' or namescope.endswith(scope._NAMESCOPE_SEPARATOR):
return namescope
else:
return namescope + scope._NAMESCOPE_SEPARATOR
def GetParams(self, namescope=None, top_scope=False):
'''
Returns the params in current namescope
'''
Reported by Pylint.
Line: 283
Column: 41
else:
return namescope + scope._NAMESCOPE_SEPARATOR
def GetParams(self, namescope=None, top_scope=False):
'''
Returns the params in current namescope
'''
namescope = ModelHelper._NormalizeNamescope(namescope)
Reported by Pylint.
Line: 316
Column: 9
self.Validate()
self.gradient_ops_added = True
self.grad_map = self.net.AddGradientOperators(*args, **kwargs)
self.param_to_grad = self.get_param_to_grad(self.params)
# Populate ParameterInfo for all parameters if missing
# and add gradient blob information. So optimizers can use it
for param, grad in self.param_to_grad.items():
Reported by Pylint.
Line: 441
Column: 29
raise AttributeError(
"Operator {} is not known to be safe".format(op_type))
logging.warning("You are creating an op that the ModelHelper "
"does not recognize: {}.".format(op_type))
return self.net.__getattr__(op_type)
def __dir__(self):
return sorted(set(chain(
Reported by Pylint.
Line: 544
Column: 9
]
)
except ValueError:
raise Exception("No ops with input={}".format(input_blobs))
try:
last_op_with_output = max(
[
j for j in range(len(ops))
if output_blobs.intersection(ops[j].output)
Reported by Pylint.
Line: 553
Column: 9
]
)
except ValueError:
raise Exception("No ops with output={}".format(output_blobs))
def validate_op(op):
# Check that the op does not have is_test = 0 set. This is a common
# pitfall with SpatialBN op, at lest.
for arg in op.arg:
Reported by Pylint.
Line: 582
Column: 3
if known_blobs.issuperset(op.input):
# Special handling for recurrent nets
# TODO: when standard argument type for "nets" is introduced,
# this can be more general
if op.type == 'RecurrentNetwork':
for arg in op.arg:
if arg.name == 'backward_step_net':
arg.ClearField(str('n'))
Reported by Pylint.
Line: 621
Column: 17
else:
logging.debug(
"Op {} had unknown inputs: {}".format(
op.type, set(op.input).difference(known_blobs)
)
)
# Predictor net's external inputs and outputs include only those
Reported by Pylint.
scripts/model_zoo/update-models-from-caffe2.py
77 issues
Line: 3
Column: 1
#! /usr/bin/env python3
import onnx.backend
import argparse
import caffe2.python.workspace as c2_workspace
import glob
import json
import numpy as np
Reported by Pylint.
Line: 6
Column: 1
import onnx.backend
import argparse
import caffe2.python.workspace as c2_workspace
import glob
import json
import numpy as np
import onnx
import caffe2.python.onnx.frontend
Reported by Pylint.
Line: 10
Column: 1
import glob
import json
import numpy as np
import onnx
import caffe2.python.onnx.frontend
import caffe2.python.onnx.backend
import os
import shutil
import tarfile
Reported by Pylint.
Line: 11
Column: 1
import json
import numpy as np
import onnx
import caffe2.python.onnx.frontend
import caffe2.python.onnx.backend
import os
import shutil
import tarfile
import tempfile
Reported by Pylint.
Line: 12
Column: 1
import numpy as np
import onnx
import caffe2.python.onnx.frontend
import caffe2.python.onnx.backend
import os
import shutil
import tarfile
import tempfile
Reported by Pylint.
Line: 18
Column: 1
import tarfile
import tempfile
import boto3
from six.moves.urllib.request import urlretrieve
from caffe2.python.models.download import downloadFromURLToFile, getURLFromName, deleteDirectory
from caffe2.proto import caffe2_pb2
Reported by Pylint.
Line: 22
Column: 1
from six.moves.urllib.request import urlretrieve
from caffe2.python.models.download import downloadFromURLToFile, getURLFromName, deleteDirectory
from caffe2.proto import caffe2_pb2
from onnx import numpy_helper
"""A script converting Caffe2 models to ONNX, and updating ONNX model zoos.
Reported by Pylint.
Line: 23
Column: 1
from six.moves.urllib.request import urlretrieve
from caffe2.python.models.download import downloadFromURLToFile, getURLFromName, deleteDirectory
from caffe2.proto import caffe2_pb2
from onnx import numpy_helper
"""A script converting Caffe2 models to ONNX, and updating ONNX model zoos.
Reported by Pylint.
Line: 24
Column: 1
from caffe2.python.models.download import downloadFromURLToFile, getURLFromName, deleteDirectory
from caffe2.proto import caffe2_pb2
from onnx import numpy_helper
"""A script converting Caffe2 models to ONNX, and updating ONNX model zoos.
Arguments:
Reported by Pylint.
Line: 160
Column: 9
elif tensor_type.elem_type == onnx.TensorProto.INT:
type = np.int32
else:
raise
array = np.random.rand(*shape).astype(type)
return array
def generate_test_input_data(onnx_model, scale):
Reported by Pylint.