The following issues were found
benchmarks/operator_benchmark/pt/qarithmetic_test.py
33 issues
Line: 1
Column: 1
import torch
from torch._ops import ops
import operator_benchmark as op_bench
qarithmetic_binary_configs = op_bench.cross_product_configs(
N=(2, 8, 64, 512),
dtype=(torch.quint8, torch.qint8, torch.qint32),
# contig=(False, True), # TODO: Reenable this after #29435
contig=(True,),
Reported by Pylint.
Line: 2
Column: 1
import torch
from torch._ops import ops
import operator_benchmark as op_bench
qarithmetic_binary_configs = op_bench.cross_product_configs(
N=(2, 8, 64, 512),
dtype=(torch.quint8, torch.qint8, torch.qint32),
# contig=(False, True), # TODO: Reenable this after #29435
contig=(True,),
Reported by Pylint.
Line: 5
Column: 30
from torch._ops import ops
import operator_benchmark as op_bench
qarithmetic_binary_configs = op_bench.cross_product_configs(
N=(2, 8, 64, 512),
dtype=(torch.quint8, torch.qint8, torch.qint32),
# contig=(False, True), # TODO: Reenable this after #29435
contig=(True,),
tags=('short',)
Reported by Pylint.
Line: 14
Column: 26
)
qarithmetic_binary_ops = op_bench.op_list(
attrs=(
('add', ops.quantized.add),
('add_relu', ops.quantized.add_relu),
('mul', ops.quantized.mul),
),
Reported by Pylint.
Line: 23
Column: 33
attr_names=('op_name', 'op_func'),
)
qarithmetic_binary_scalar_ops = op_bench.op_list(
attrs=(
('add_scalar', ops.quantized.add_scalar),
('mul_scalar', ops.quantized.mul_scalar),
),
attr_names=('op_name', 'op_func'),
Reported by Pylint.
Line: 31
Column: 49
attr_names=('op_name', 'op_func'),
)
class _QFunctionalBinaryArithmeticBenchmarkBase(op_bench.TorchBenchmarkBase):
def setup(self, N, dtype, contig):
self.qfunctional = torch.nn.quantized.QFunctional()
# TODO: Consider more diverse shapes
f_input = (torch.rand(N, N) - 0.5) * 256
Reported by Pylint.
Line: 63
Column: 1
return self.op_func(q_input_a, q_input_b, scale=scale, zero_point=zero_point)
op_bench.generate_pt_tests_from_op_list(qarithmetic_binary_ops,
qarithmetic_binary_configs,
QFunctionalBenchmark)
class QFunctionalScalarBenchmark(_QFunctionalBinaryArithmeticBenchmarkBase):
Reported by Pylint.
Line: 81
Column: 1
return self.op_func(q_input, scalar_input)
op_bench.generate_pt_tests_from_op_list(qarithmetic_binary_scalar_ops,
qarithmetic_binary_configs,
QFunctionalScalarBenchmark)
if __name__ == '__main__':
Reported by Pylint.
Line: 8
Column: 28
qarithmetic_binary_configs = op_bench.cross_product_configs(
N=(2, 8, 64, 512),
dtype=(torch.quint8, torch.qint8, torch.qint32),
# contig=(False, True), # TODO: Reenable this after #29435
contig=(True,),
tags=('short',)
)
Reported by Pylint.
Line: 33
Column: 9
class _QFunctionalBinaryArithmeticBenchmarkBase(op_bench.TorchBenchmarkBase):
def setup(self, N, dtype, contig):
self.qfunctional = torch.nn.quantized.QFunctional()
# TODO: Consider more diverse shapes
f_input = (torch.rand(N, N) - 0.5) * 256
self.scale = 1.0
self.zero_point = 0
Reported by Pylint.
torch/quantization/fx/match_utils.py
33 issues
Line: 7
Column: 1
Graph,
Node,
)
from .quantization_types import Pattern
from .quantization_patterns import (
QuantizeHandler,
CustomModuleQuantizeHandler,
StandaloneModuleQuantizeHandler,
BinaryOpQuantizeHandler,
Reported by Pylint.
Line: 8
Column: 1
Node,
)
from .quantization_types import Pattern
from .quantization_patterns import (
QuantizeHandler,
CustomModuleQuantizeHandler,
StandaloneModuleQuantizeHandler,
BinaryOpQuantizeHandler,
binary_op_supported_dtypes,
Reported by Pylint.
Line: 16
Column: 1
binary_op_supported_dtypes,
binary_reference_op_supported_dtypes,
)
from ..qconfig import (
QConfigAny,
)
from .graph_module import (
is_observed_standalone_module,
)
Reported by Pylint.
Line: 19
Column: 1
from ..qconfig import (
QConfigAny,
)
from .graph_module import (
is_observed_standalone_module,
)
from ..utils import get_qconfig_dtypes
from typing import Any, Dict, List, Callable, Optional, Tuple, Set
Reported by Pylint.
Line: 22
Column: 1
from .graph_module import (
is_observed_standalone_module,
)
from ..utils import get_qconfig_dtypes
from typing import Any, Dict, List, Callable, Optional, Tuple, Set
MatchResult = Tuple[Node, List[Node], Optional[Pattern], QuantizeHandler,
QConfigAny]
Reported by Pylint.
Line: 32
Column: 5
class MatchAllNode:
""" A node pattern that matches all nodes
"""
pass
# Note: The order of patterns is important! match function will take whatever is matched first, so we'll
# need to put the fusion patterns before single patterns. For example, add_relu should be registered come before relu.
# decorators are applied in the reverse order we see. Also when we match the nodes in the graph with these patterns,
# we'll start from the last node of the graph and traverse back.
Reported by Pylint.
Line: 133
Column: 5
else:
matched.append(node)
cache_for_no_tensor_check: Dict[Node, bool] = dict()
for node in reversed(graph.nodes):
if node.name not in match_map and node.name not in all_matched:
for pattern, value in patterns.items():
if is_match(modules, node, pattern):
skip_this_match = False
Reported by Pylint.
Line: 157
Column: 3
qconfig_map[base_node.name]
if this_node_qconfig:
dtypes = get_qconfig_dtypes(this_node_qconfig)
# TODO(future PR): update the pattern to quantize
# handler logic to take this into account.
# This needs to handle 3 cases
# 1) op and dtype is in either [is_ref or non-ref] list -> don't skip
Reported by Pylint.
Line: 1
Column: 1
import sys
import torch
from torch.fx.graph import (
Graph,
Node,
)
from .quantization_types import Pattern
from .quantization_patterns import (
QuantizeHandler,
Reported by Pylint.
Line: 24
Column: 1
)
from ..utils import get_qconfig_dtypes
from typing import Any, Dict, List, Callable, Optional, Tuple, Set
MatchResult = Tuple[Node, List[Node], Optional[Pattern], QuantizeHandler,
QConfigAny]
class MatchAllNode:
Reported by Pylint.
torch/fx/experimental/unification/multipledispatch/conflict.py
33 issues
Line: 1
Column: 1
from .utils import _toposort, groupby
from .variadic import isvariadic
class AmbiguityWarning(Warning):
pass
def supercedes(a, b):
Reported by Pylint.
Line: 2
Column: 1
from .utils import _toposort, groupby
from .variadic import isvariadic
class AmbiguityWarning(Warning):
pass
def supercedes(a, b):
Reported by Pylint.
Line: 75
Column: 58
def ambiguous(a, b):
""" A is consistent with B but neither is strictly more specific """
return consistent(a, b) and not (supercedes(a, b) or supercedes(b, a))
def ambiguities(signatures):
""" All signature pairs such that A is ambiguous with B """
signatures = list(map(tuple, signatures))
Reported by Pylint.
Line: 103
Column: 38
"""
# A either supercedes B and B does not supercede A or if B does then call
# tie_breaker
return supercedes(a, b) and (not supercedes(b, a) or tie_breaker(a) > tie_breaker(b))
def ordering(signatures):
""" A sane ordering of signatures to check, first to last
Topoological sort of edges as given by ``edge`` and ``supercedes``
Reported by Pylint.
Line: 1
Column: 1
from .utils import _toposort, groupby
from .variadic import isvariadic
class AmbiguityWarning(Warning):
pass
def supercedes(a, b):
Reported by Pylint.
Line: 5
Column: 1
from .variadic import isvariadic
class AmbiguityWarning(Warning):
pass
def supercedes(a, b):
""" A is consistent and strictly more specific than B """
Reported by Pylint.
Line: 9
Column: 1
pass
def supercedes(a, b):
""" A is consistent and strictly more specific than B """
if len(a) < len(b):
# only case is if a is empty and b is variadic
return not a and len(b) == 1 and isvariadic(b[-1])
elif len(a) == len(b):
Reported by Pylint.
Line: 9
Column: 1
pass
def supercedes(a, b):
""" A is consistent and strictly more specific than B """
if len(a) < len(b):
# only case is if a is empty and b is variadic
return not a and len(b) == 1 and isvariadic(b[-1])
elif len(a) == len(b):
Reported by Pylint.
Line: 11
Column: 5
def supercedes(a, b):
""" A is consistent and strictly more specific than B """
if len(a) < len(b):
# only case is if a is empty and b is variadic
return not a and len(b) == 1 and isvariadic(b[-1])
elif len(a) == len(b):
return all(map(issubclass, a, b))
else:
Reported by Pylint.
Line: 18
Column: 9
return all(map(issubclass, a, b))
else:
# len(a) > len(b)
p1 = 0
p2 = 0
while p1 < len(a) and p2 < len(b):
cur_a = a[p1]
cur_b = b[p2]
if not (isvariadic(cur_a) or isvariadic(cur_b)):
Reported by Pylint.
caffe2/python/brew.py
33 issues
Line: 15
Column: 1
from caffe2.python.model_helper import ModelHelper
# flake8: noqa
from caffe2.python.helpers.algebra import *
from caffe2.python.helpers.arg_scope import *
from caffe2.python.helpers.array_helpers import *
from caffe2.python.helpers.control_ops import *
from caffe2.python.helpers.conv import *
from caffe2.python.helpers.db_input import *
Reported by Pylint.
Line: 15
Column: 1
from caffe2.python.model_helper import ModelHelper
# flake8: noqa
from caffe2.python.helpers.algebra import *
from caffe2.python.helpers.arg_scope import *
from caffe2.python.helpers.array_helpers import *
from caffe2.python.helpers.control_ops import *
from caffe2.python.helpers.conv import *
from caffe2.python.helpers.db_input import *
Reported by Pylint.
Line: 16
Column: 1
# flake8: noqa
from caffe2.python.helpers.algebra import *
from caffe2.python.helpers.arg_scope import *
from caffe2.python.helpers.array_helpers import *
from caffe2.python.helpers.control_ops import *
from caffe2.python.helpers.conv import *
from caffe2.python.helpers.db_input import *
from caffe2.python.helpers.dropout import *
Reported by Pylint.
Line: 16
Column: 1
# flake8: noqa
from caffe2.python.helpers.algebra import *
from caffe2.python.helpers.arg_scope import *
from caffe2.python.helpers.array_helpers import *
from caffe2.python.helpers.control_ops import *
from caffe2.python.helpers.conv import *
from caffe2.python.helpers.db_input import *
from caffe2.python.helpers.dropout import *
Reported by Pylint.
Line: 16
Column: 1
# flake8: noqa
from caffe2.python.helpers.algebra import *
from caffe2.python.helpers.arg_scope import *
from caffe2.python.helpers.array_helpers import *
from caffe2.python.helpers.control_ops import *
from caffe2.python.helpers.conv import *
from caffe2.python.helpers.db_input import *
from caffe2.python.helpers.dropout import *
Reported by Pylint.
Line: 17
Column: 1
# flake8: noqa
from caffe2.python.helpers.algebra import *
from caffe2.python.helpers.arg_scope import *
from caffe2.python.helpers.array_helpers import *
from caffe2.python.helpers.control_ops import *
from caffe2.python.helpers.conv import *
from caffe2.python.helpers.db_input import *
from caffe2.python.helpers.dropout import *
from caffe2.python.helpers.elementwise_linear import *
Reported by Pylint.
Line: 18
Column: 1
from caffe2.python.helpers.algebra import *
from caffe2.python.helpers.arg_scope import *
from caffe2.python.helpers.array_helpers import *
from caffe2.python.helpers.control_ops import *
from caffe2.python.helpers.conv import *
from caffe2.python.helpers.db_input import *
from caffe2.python.helpers.dropout import *
from caffe2.python.helpers.elementwise_linear import *
from caffe2.python.helpers.fc import *
Reported by Pylint.
Line: 18
Column: 1
from caffe2.python.helpers.algebra import *
from caffe2.python.helpers.arg_scope import *
from caffe2.python.helpers.array_helpers import *
from caffe2.python.helpers.control_ops import *
from caffe2.python.helpers.conv import *
from caffe2.python.helpers.db_input import *
from caffe2.python.helpers.dropout import *
from caffe2.python.helpers.elementwise_linear import *
from caffe2.python.helpers.fc import *
Reported by Pylint.
Line: 18
Column: 1
from caffe2.python.helpers.algebra import *
from caffe2.python.helpers.arg_scope import *
from caffe2.python.helpers.array_helpers import *
from caffe2.python.helpers.control_ops import *
from caffe2.python.helpers.conv import *
from caffe2.python.helpers.db_input import *
from caffe2.python.helpers.dropout import *
from caffe2.python.helpers.elementwise_linear import *
from caffe2.python.helpers.fc import *
Reported by Pylint.
Line: 19
Column: 1
from caffe2.python.helpers.arg_scope import *
from caffe2.python.helpers.array_helpers import *
from caffe2.python.helpers.control_ops import *
from caffe2.python.helpers.conv import *
from caffe2.python.helpers.db_input import *
from caffe2.python.helpers.dropout import *
from caffe2.python.helpers.elementwise_linear import *
from caffe2.python.helpers.fc import *
from caffe2.python.helpers.nonlinearity import *
Reported by Pylint.
torch/jit/_monkeytype_config.py
33 issues
Line: 12
Column: 5
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
from monkeytype.db.base import CallTraceThunk, CallTraceStore, CallTraceStoreLogger # type: ignore[import]
from monkeytype.config import _startswith, LIB_PATHS # type: ignore[import]
from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import]
except ImportError:
_IS_MONKEYTYPE_INSTALLED = False
Reported by Pylint.
Line: 47
Column: 9
class JitTypeTraceStoreLogger(CallTraceStoreLogger):
"""A JitTypeCallTraceLogger that stores logged traces in a CallTraceStore."""
def __init__(self, store: CallTraceStore):
super().__init__(store)
def log(self, trace: CallTrace) -> None:
self.traces.append(trace)
Reported by Pylint.
Line: 69
Column: 13
def filter(
self,
qualified_name: str,
qualname_prefix: Optional[str] = None,
limit: int = 2000
) -> List[CallTraceThunk]:
return self.trace_records[qualified_name]
def analyze(self, qualified_name: str) -> Dict:
Reported by Pylint.
Line: 70
Column: 13
self,
qualified_name: str,
qualname_prefix: Optional[str] = None,
limit: int = 2000
) -> List[CallTraceThunk]:
return self.trace_records[qualified_name]
def analyze(self, qualified_name: str) -> Dict:
# Analyze the types for the given module
Reported by Pylint.
Line: 109
Column: 3
_all_type = _all_type.lstrip(" ") # Remove any trailing spaces
if len(types) == 2 and 'NoneType' in _all_type:
# TODO: To remove this check once Union suppport in TorchScript lands.
all_args[arg] = {get_optional_of_element_type(_all_type)}
elif len(types) > 1:
all_args[arg] = {'Any'}
else:
all_args[arg] = {_all_type[:-1]}
Reported by Pylint.
Line: 1
Column: 1
import inspect
import typing
import pathlib
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
Reported by Pylint.
Line: 5
Column: 1
import typing
import pathlib
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
try:
Reported by Pylint.
Line: 6
Column: 1
import pathlib
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
Reported by Pylint.
Line: 7
Column: 1
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
Reported by Pylint.
Line: 13
Column: 1
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
from monkeytype.db.base import CallTraceThunk, CallTraceStore, CallTraceStoreLogger # type: ignore[import]
from monkeytype.config import _startswith, LIB_PATHS # type: ignore[import]
from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import]
except ImportError:
_IS_MONKEYTYPE_INSTALLED = False
Reported by Pylint.
caffe2/python/ideep/expanddims_squeeze_op_test.py
33 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ExpandDimsSqueezeTest(hu.HypothesisTestCase):
@given(
squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3),
inplace=st.booleans(),
**mu.gcs
Reported by Pylint.
Line: 22
Column: 51
inplace=st.booleans(),
**mu.gcs
)
def test_squeeze(self, squeeze_dims, inplace, gc, dc):
shape = [
1 if dim in squeeze_dims else np.random.randint(1, 5)
for dim in range(4)
]
X = np.random.rand(*shape).astype(np.float32)
Reported by Pylint.
Line: 38
Column: 60
inplace=st.booleans(),
**mu.gcs_cpu_ideep
)
def test_squeeze_fallback(self, squeeze_dims, inplace, gc, dc):
shape = [
1 if dim in squeeze_dims else np.random.randint(1, 5)
for dim in range(4)
]
X = np.random.rand(*shape).astype(np.float32)
Reported by Pylint.
Line: 70
Column: 13
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
@given(
squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3),
inplace=st.booleans(),
Reported by Pylint.
Line: 78
Column: 55
inplace=st.booleans(),
**mu.gcs
)
def test_expand_dims(self, squeeze_dims, inplace, gc, dc):
oshape = [
1 if dim in squeeze_dims else np.random.randint(2, 5)
for dim in range(4)
]
nshape = [s for s in oshape if s!=1]
Reported by Pylint.
Line: 97
Column: 64
inplace=st.booleans(),
**mu.gcs_cpu_ideep
)
def test_expand_dims_fallback(self, squeeze_dims, inplace, gc, dc):
oshape = [
1 if dim in squeeze_dims else np.random.randint(2, 5)
for dim in range(4)
]
nshape = [s for s in oshape if s!=1]
Reported by Pylint.
Line: 132
Column: 13
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
if __name__ == "__main__":
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
benchmarks/fastrnns/profile.py
33 issues
Line: 58
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b602_subprocess_popen_with_shell_equals_true.html
"""Returns (return-code, stdout, stderr)"""
print('[system] {}'.format(command))
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
output, err = p.communicate()
rc = p.returncode
output = output.decode("ascii")
err = err.decode("ascii")
return rc, output, err
Reported by Bandit.
Line: 5
Column: 1
import subprocess
import sys
import time
import torch
import datetime
from .runner import get_nn_runners
Reported by Pylint.
Line: 8
Column: 1
import torch
import datetime
from .runner import get_nn_runners
def run_rnn(name, rnn_creator, nloops=5,
seqLength=100, numLayers=1, inputSize=512, hiddenSize=512,
miniBatch=64, device='cuda', seed=None):
Reported by Pylint.
Line: 11
Column: 13
from .runner import get_nn_runners
def run_rnn(name, rnn_creator, nloops=5,
seqLength=100, numLayers=1, inputSize=512, hiddenSize=512,
miniBatch=64, device='cuda', seed=None):
def run_iter(modeldef):
# Forward
forward_output = modeldef.forward(*modeldef.inputs)
Reported by Pylint.
Line: 38
Column: 5
miniBatch=miniBatch, device=device, seed=seed)
modeldef = rnn_creator(**creator_args)
[run_iter(modeldef) for _ in range(nloops)]
def profile(rnns, sleep_between_seconds=1, nloops=5,
internal_run=True, # Unused, get rid of this TODO
seqLength=100, numLayers=1, inputSize=512, hiddenSize=512,
Reported by Pylint.
Line: 42
Column: 13
def profile(rnns, sleep_between_seconds=1, nloops=5,
internal_run=True, # Unused, get rid of this TODO
seqLength=100, numLayers=1, inputSize=512, hiddenSize=512,
miniBatch=64, device='cuda', seed=None):
params = dict(seqLength=seqLength, numLayers=numLayers,
inputSize=inputSize, hiddenSize=hiddenSize,
miniBatch=miniBatch, device=device, seed=seed)
Reported by Pylint.
Line: 92
Column: 1
return system('nvprof -o {} {}'.format(outpath, cmd))
def full_profile(rnns, **args):
profile_args = []
for k, v in args.items():
profile_args.append('--{}={}'.format(k, v))
profile_args.append('--rnns {}'.format(' '.join(rnns)))
profile_args.append('--internal_run')
Reported by Pylint.
Line: 1
Column: 1
import argparse
import subprocess
import sys
import time
import torch
import datetime
from .runner import get_nn_runners
Reported by Pylint.
Line: 2
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
import argparse
import subprocess
import sys
import time
import torch
import datetime
from .runner import get_nn_runners
Reported by Bandit.
Line: 6
Column: 1
import sys
import time
import torch
import datetime
from .runner import get_nn_runners
def run_rnn(name, rnn_creator, nloops=5,
Reported by Pylint.
test/quantization/eager/test_equalize_eager.py
33 issues
Line: 1
Column: 1
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.quantization.fuse_modules import fuse_modules
import torch.quantization._equalize as _equalize
import copy
Reported by Pylint.
Line: 2
Column: 1
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.quantization.fuse_modules import fuse_modules
import torch.quantization._equalize as _equalize
import copy
Reported by Pylint.
Line: 4
Column: 1
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.quantization.fuse_modules import fuse_modules
import torch.quantization._equalize as _equalize
import copy
Reported by Pylint.
Line: 5
Column: 1
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.quantization.fuse_modules import fuse_modules
import torch.quantization._equalize as _equalize
import copy
Reported by Pylint.
Line: 7
Column: 1
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.quantization.fuse_modules import fuse_modules
import torch.quantization._equalize as _equalize
import copy
class TestEqualizeEager(QuantizationTestCase):
def checkChannelsEqualized(self, tensor1, tensor2, output_axis, input_axis):
Reported by Pylint.
Line: 29
Column: 20
curr = model
name = name.split('.')
for subname in name:
curr = curr._modules[subname]
return curr
def test_cross_layer_equalization(self):
''' applies _equalize.cross_layer_equalization on two modules and checks
to make sure channels ranges are equivalent
Reported by Pylint.
Line: 95
Column: 9
self.checkChannelsEqualized(linear1.weight, linear2.weight, 0, 1)
self.checkChannelsEqualized(linear2.weight, linear3.weight, 0, 1)
input = torch.randn(20, 3)
self.assertEqual(chain1(input), chain2(input))
def test_equalize_fused_convrelu(self):
''' Checks to see if eager mode equalization supports fused
ConvReLU2d models
Reported by Pylint.
Line: 140
Column: 9
self.checkChannelsEqualized(conv1.weight, conv2.weight, 0, 1)
self.checkChannelsEqualized(conv2.weight, conv3.weight, 0, 1)
input = torch.randn(3, 3, 1, 1)
self.assertEqual(fused_model1(input), fused_model2(input))
self.assertEqual(fused_model1(input), model(input))
def test_equalize_fused_linearrelu(self):
''' Checks to see if eager mode equalization supports fused
Reported by Pylint.
Line: 186
Column: 9
self.checkChannelsEqualized(linear1.weight, linear2.weight, 0, 1)
self.checkChannelsEqualized(linear2.weight, linear3.weight, 0, 1)
input = torch.randn(20, 3)
self.assertEqual(fused_model1(input), fused_model2(input))
self.assertEqual(fused_model1(input), model(input))
Reported by Pylint.
Line: 1
Column: 1
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.quantization.fuse_modules import fuse_modules
import torch.quantization._equalize as _equalize
import copy
Reported by Pylint.
caffe2/experiments/python/device_reduce_sum_bench.py
33 issues
Line: 71
Column: 18
["y"]
)
for n in itertools.imap(pow, itertools.cycle([10]), range(10)):
X = np.random.rand(n).astype(np.float32)
logger.info('Running benchmark for n = {}'.format(n))
ret = runOpBenchmark(gpu_do, op, inputs=[X])
self.results.append((n, ret[1]))
Reported by Pylint.
Line: 86
Column: 18
["y"]
)
for n in itertools.imap(pow, itertools.cycle([10]), range(10)):
X = np.random.rand(n).astype(np.float32)
logger.info('Running benchmark for n = {}'.format(n))
ret = runOpBenchmark(gpu_do, op, inputs=[X])
self.results.append((n, ret[1]))
Reported by Pylint.
Line: 101
Column: 18
["probs", "avgloss"],
)
for n in itertools.imap(pow, itertools.cycle([10]), range(8)):
for D in itertools.imap(pow, itertools.cycle([10]), range(3)):
X = np.random.rand(n, D).astype(np.float32)
label = (np.random.rand(n) * D).astype(np.int32)
logger.info('Running benchmark for n = {}, D= {}'.format(n, D))
ret = runOpBenchmark(gpu_do, op, inputs=[X, label])
Reported by Pylint.
Line: 102
Column: 22
)
for n in itertools.imap(pow, itertools.cycle([10]), range(8)):
for D in itertools.imap(pow, itertools.cycle([10]), range(3)):
X = np.random.rand(n, D).astype(np.float32)
label = (np.random.rand(n) * D).astype(np.int32)
logger.info('Running benchmark for n = {}, D= {}'.format(n, D))
ret = runOpBenchmark(gpu_do, op, inputs=[X, label])
self.results.append(((n, D), ret[1]))
Reported by Pylint.
Line: 73
Column: 25
for n in itertools.imap(pow, itertools.cycle([10]), range(10)):
X = np.random.rand(n).astype(np.float32)
logger.info('Running benchmark for n = {}'.format(n))
ret = runOpBenchmark(gpu_do, op, inputs=[X])
self.results.append((n, ret[1]))
class SumSqrElements(Benchmark):
Reported by Pylint.
Line: 88
Column: 25
for n in itertools.imap(pow, itertools.cycle([10]), range(10)):
X = np.random.rand(n).astype(np.float32)
logger.info('Running benchmark for n = {}'.format(n))
ret = runOpBenchmark(gpu_do, op, inputs=[X])
self.results.append((n, ret[1]))
class SoftMaxWithLoss(Benchmark):
Reported by Pylint.
Line: 105
Column: 29
for D in itertools.imap(pow, itertools.cycle([10]), range(3)):
X = np.random.rand(n, D).astype(np.float32)
label = (np.random.rand(n) * D).astype(np.int32)
logger.info('Running benchmark for n = {}, D= {}'.format(n, D))
ret = runOpBenchmark(gpu_do, op, inputs=[X, label])
self.results.append(((n, D), ret[1]))
def parse_args():
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 41
Column: 1
ALL_BENCHMARKS = {}
class BenchmarkMeta(type):
def __new__(metacls, name, bases, class_dict):
cls = type.__new__(metacls, name, bases, class_dict)
if name != 'Benchmark':
ALL_BENCHMARKS[name] = cls
return cls
Reported by Pylint.
Line: 42
Column: 5
class BenchmarkMeta(type):
def __new__(metacls, name, bases, class_dict):
cls = type.__new__(metacls, name, bases, class_dict)
if name != 'Benchmark':
ALL_BENCHMARKS[name] = cls
return cls
Reported by Pylint.
caffe2/python/dataset.py
33 issues
Line: 267
Column: 5
"""Return the list of field names for this dataset."""
return self.fields
def field_types(self):
"""
Return the list of field dtypes for this dataset.
If a list of strings, not a schema.Struct, was passed to the
constructor, this will return a list of dtype(np.void).
Reported by Pylint.
Line: 35
Column: 5
self.enforce_batch_size = enforce_batch_size
self.cursor = None
def setup_ex(self, init_net, exit_net):
if self.cursor is None:
self.cursor = init_net.CreateTreeCursor(
[],
init_net.NextScopedBlob(self.name),
fields=self.dataset.fields)
Reported by Pylint.
Line: 35
Column: 34
self.enforce_batch_size = enforce_batch_size
self.cursor = None
def setup_ex(self, init_net, exit_net):
if self.cursor is None:
self.cursor = init_net.CreateTreeCursor(
[],
init_net.NextScopedBlob(self.name),
fields=self.dataset.fields)
Reported by Pylint.
Line: 71
Column: 5
self.loop_over = loop_over
self.enforce_batch_size = enforce_batch_size
def setup_ex(self, init_net, exit_net):
if self.cursor is None:
self.cursor = init_net.CreateTreeCursor(
[],
init_net.NextScopedBlob(self.name),
fields=self.dataset.fields)
Reported by Pylint.
Line: 71
Column: 34
self.loop_over = loop_over
self.enforce_batch_size = enforce_batch_size
def setup_ex(self, init_net, exit_net):
if self.cursor is None:
self.cursor = init_net.CreateTreeCursor(
[],
init_net.NextScopedBlob(self.name),
fields=self.dataset.fields)
Reported by Pylint.
Line: 86
Column: 9
offsets = net.ComputeOffset(
[self.cursor] + self.dataset.content().field_blobs(),
'offsets')
self.offsets = offsets
def sort_and_shuffle(self, net, sort_by_field=None,
shuffle_size=1, batch_size=1):
# no sorting by default
content = self.dataset.content()
Reported by Pylint.
Line: 130
Column: 34
self._content = content
self.mutex = None
def setup_ex(self, init_net, exit_net):
if self.mutex is None:
self.mutex = init_net.CreateMutex([])
def write(self, writer_net, fields):
"""
Reported by Pylint.
Line: 130
Column: 5
self._content = content
self.mutex = None
def setup_ex(self, init_net, exit_net):
if self.mutex is None:
self.mutex = init_net.CreateMutex([])
def write(self, writer_net, fields):
"""
Reported by Pylint.
Line: 156
Column: 9
def commit(self, finish_net):
"""Commit is a no-op for an in-memory dataset."""
pass
def Const(net, value, dtype=None, name=None):
"""
Create a 'constant' by first creating an external input in the given
Reported by Pylint.
Line: 173
Column: 40
return blob
def execution_step_with_progress(name, init_net, substeps, rows_read):
# progress reporter
report_net = core.Net('report_net')
report_net.Print([rows_read], [])
return core.execution_step(
name,
Reported by Pylint.