The following issues were found
caffe2/python/data_workers_test.py
34 issues
Line: 15
Column: 31
import caffe2.python.data_workers as data_workers
def dummy_fetcher(fetcher_id, batch_size):
# Create random amount of values
n = np.random.randint(64) + 1
data = np.zeros((n, 3))
labels = []
for j in range(n):
Reported by Pylint.
Line: 27
Column: 23
return [np.array(data), np.array(labels)]
def dummy_fetcher_rnn(fetcher_id, batch_size):
# Hardcoding some input blobs
T = 20
N = batch_size
D = 33
data = np.random.rand(T, N, D)
Reported by Pylint.
Line: 44
Column: 22
workspace.ResetWorkspace()
model = model_helper.ModelHelper(name="test")
old_seq_id = data_workers.global_coordinator._fetcher_id_seq
coordinator = data_workers.init_data_input_workers(
model,
["data", "label"],
dummy_fetcher,
32,
Reported by Pylint.
Line: 53
Column: 22
2,
input_source_name="unittest"
)
new_seq_id = data_workers.global_coordinator._fetcher_id_seq
self.assertEqual(new_seq_id, old_seq_id + 2)
coordinator.start()
workspace.RunNetOnce(model.param_init_net)
Reported by Pylint.
Line: 77
Column: 26
self.assertEqual(labels[j], data[j, 2])
coordinator.stop_coordinator("unittest")
self.assertEqual(coordinator._coordinators, [])
def testRNNInput(self):
workspace.ResetWorkspace()
model = model_helper.ModelHelper(name="rnn_test")
old_seq_id = data_workers.global_coordinator._fetcher_id_seq
Reported by Pylint.
Line: 82
Column: 22
def testRNNInput(self):
workspace.ResetWorkspace()
model = model_helper.ModelHelper(name="rnn_test")
old_seq_id = data_workers.global_coordinator._fetcher_id_seq
coordinator = data_workers.init_data_input_workers(
model,
["data1", "label1", "seq_lengths1"],
dummy_fetcher_rnn,
32,
Reported by Pylint.
Line: 92
Column: 22
dont_rebatch=False,
batch_columns=[1, 1, 0],
)
new_seq_id = data_workers.global_coordinator._fetcher_id_seq
self.assertEqual(new_seq_id, old_seq_id + 2)
coordinator.start()
workspace.RunNetOnce(model.param_init_net)
Reported by Pylint.
Line: 100
Column: 15
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
while coordinator._coordinators[0]._state._inputs < 100:
time.sleep(0.01)
# Run a couple of rounds
workspace.RunNet(model.net.Proto().name)
workspace.RunNet(model.net.Proto().name)
Reported by Pylint.
Line: 100
Column: 15
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
while coordinator._coordinators[0]._state._inputs < 100:
time.sleep(0.01)
# Run a couple of rounds
workspace.RunNet(model.net.Proto().name)
workspace.RunNet(model.net.Proto().name)
Reported by Pylint.
Line: 100
Column: 15
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
while coordinator._coordinators[0]._state._inputs < 100:
time.sleep(0.01)
# Run a couple of rounds
workspace.RunNet(model.net.Proto().name)
workspace.RunNet(model.net.Proto().name)
Reported by Pylint.
benchmarks/distributed/ddp/benchmark.py
34 issues
Line: 21
Column: 1
import time
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import torchvision
Reported by Pylint.
Line: 22
Column: 1
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import torchvision
Reported by Pylint.
Line: 23
Column: 1
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import torchvision
def allgather_object(obj):
Reported by Pylint.
Line: 24
Column: 1
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import torchvision
def allgather_object(obj):
out = [None for _ in range(dist.get_world_size())]
Reported by Pylint.
Line: 25
Column: 1
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import torchvision
def allgather_object(obj):
out = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(out, obj)
Reported by Pylint.
Line: 34
Column: 12
return out
def allgather_run(cmd):
proc = subprocess.run(shlex.split(cmd), capture_output=True)
assert(proc.returncode == 0)
return allgather_object(proc.stdout.decode("utf-8"))
Reported by Pylint.
Line: 115
Column: 13
def print_header():
local_print("\n")
local_print("%22s" % "")
for p in [50, 75, 90, 95]:
local_print("%14s%10s" % ("sec/iter", "ex/sec"))
local_print("\n")
def print_measurements(prefix, nelem, measurements):
measurements = sorted(measurements)
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
#
# Measure distributed training iteration time.
#
# This program performs a sweep over a) a number of model architectures, and
# b) an increasing number of processes. This produces a 1-GPU baseline,
# an 8-GPU baseline (if applicable), as well as measurements for however
# many processes can participate in training.
#
Reported by Pylint.
Line: 16
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
import json
import os
import shlex
import subprocess
import sys
import time
import numpy as np
import torch
Reported by Bandit.
Line: 28
Column: 1
import torchvision
def allgather_object(obj):
out = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(out, obj)
return out
def allgather_run(cmd):
Reported by Pylint.
torch/nn/modules/padding.py
34 issues
Line: 1
Column: 1
from .module import Module
from .utils import _pair, _quadruple, _ntuple
from .. import functional as F
from torch import Tensor
from ..common_types import _size_2_t, _size_4_t, _size_6_t
from typing import Sequence, Tuple
Reported by Pylint.
Line: 2
Column: 1
from .module import Module
from .utils import _pair, _quadruple, _ntuple
from .. import functional as F
from torch import Tensor
from ..common_types import _size_2_t, _size_4_t, _size_6_t
from typing import Sequence, Tuple
Reported by Pylint.
Line: 3
Column: 1
from .module import Module
from .utils import _pair, _quadruple, _ntuple
from .. import functional as F
from torch import Tensor
from ..common_types import _size_2_t, _size_4_t, _size_6_t
from typing import Sequence, Tuple
Reported by Pylint.
Line: 6
Column: 1
from .. import functional as F
from torch import Tensor
from ..common_types import _size_2_t, _size_4_t, _size_6_t
from typing import Sequence, Tuple
# TODO: grad_output size asserts in THNN
Reported by Pylint.
Line: 10
Column: 3
from typing import Sequence, Tuple
# TODO: grad_output size asserts in THNN
class _ConstantPadNd(Module):
__constants__ = ['padding', 'value']
value: float
Reported by Pylint.
Line: 22
Column: 23
super(_ConstantPadNd, self).__init__()
self.value = value
def forward(self, input: Tensor) -> Tensor:
return F.pad(input, self.padding, 'constant', self.value)
def extra_repr(self) -> str:
return 'padding={}, value={}'.format(self.padding, self.value)
Reported by Pylint.
Line: 173
Column: 23
__constants__ = ['padding']
padding: Sequence[int]
def forward(self, input: Tensor) -> Tensor:
return F.pad(input, self.padding, 'reflect')
def extra_repr(self) -> str:
return '{}'.format(self.padding)
Reported by Pylint.
Line: 327
Column: 23
__constants__ = ['padding']
padding: Sequence[int]
def forward(self, input: Tensor) -> Tensor:
return F.pad(input, self.padding, 'replicate')
def extra_repr(self) -> str:
return '{}'.format(self.padding)
Reported by Pylint.
Line: 1
Column: 1
from .module import Module
from .utils import _pair, _quadruple, _ntuple
from .. import functional as F
from torch import Tensor
from ..common_types import _size_2_t, _size_4_t, _size_6_t
from typing import Sequence, Tuple
Reported by Pylint.
Line: 5
Column: 1
from .utils import _pair, _quadruple, _ntuple
from .. import functional as F
from torch import Tensor
from ..common_types import _size_2_t, _size_4_t, _size_6_t
from typing import Sequence, Tuple
# TODO: grad_output size asserts in THNN
Reported by Pylint.
benchmarks/cpp/tensorexpr/bench_ops.py
34 issues
Line: 2
Column: 1
import timeit
import torch
import torch.nn.functional as F
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._debug_set_fusion_group_inlining(False)
torch.set_num_threads(1)
Reported by Pylint.
Line: 3
Column: 1
import timeit
import torch
import torch.nn.functional as F
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._debug_set_fusion_group_inlining(False)
torch.set_num_threads(1)
Reported by Pylint.
Line: 5
Column: 1
import torch
import torch.nn.functional as F
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._debug_set_fusion_group_inlining(False)
torch.set_num_threads(1)
def hardswish(x):
Reported by Pylint.
Line: 5
Column: 1
import torch
import torch.nn.functional as F
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._debug_set_fusion_group_inlining(False)
torch.set_num_threads(1)
def hardswish(x):
Reported by Pylint.
Line: 6
Column: 1
import torch.nn.functional as F
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._debug_set_fusion_group_inlining(False)
torch.set_num_threads(1)
def hardswish(x):
return x * torch.clamp(x + 3.0, 0.0, 6.0) / 6.0
Reported by Pylint.
Line: 6
Column: 1
import torch.nn.functional as F
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._debug_set_fusion_group_inlining(False)
torch.set_num_threads(1)
def hardswish(x):
return x * torch.clamp(x + 3.0, 0.0, 6.0) / 6.0
Reported by Pylint.
Line: 10
Column: 15
torch.set_num_threads(1)
def hardswish(x):
return x * torch.clamp(x + 3.0, 0.0, 6.0) / 6.0
unary_ops = [
hardswish,
Reported by Pylint.
Line: 16
Column: 5
unary_ops = [
hardswish,
torch._C._nn.hardswish,
torch.sigmoid,
torch.reciprocal,
torch.neg,
torch.relu,
torch.isnan,
Reported by Pylint.
Line: 16
Column: 5
unary_ops = [
hardswish,
torch._C._nn.hardswish,
torch.sigmoid,
torch.reciprocal,
torch.neg,
torch.relu,
torch.isnan,
Reported by Pylint.
Line: 53
Column: 40
for op in unary_ops:
x = torch.rand((1024, 1024))
traced = torch.jit.trace(lambda x: op(x), (x))
# Warmup.
warmup_iters = 8
for _ in range(warmup_iters):
op(x)
Reported by Pylint.
torch/jit/_monkeytype_config.py
33 issues
Line: 12
Column: 5
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
from monkeytype.db.base import CallTraceThunk, CallTraceStore, CallTraceStoreLogger # type: ignore[import]
from monkeytype.config import _startswith, LIB_PATHS # type: ignore[import]
from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import]
except ImportError:
_IS_MONKEYTYPE_INSTALLED = False
Reported by Pylint.
Line: 47
Column: 9
class JitTypeTraceStoreLogger(CallTraceStoreLogger):
"""A JitTypeCallTraceLogger that stores logged traces in a CallTraceStore."""
def __init__(self, store: CallTraceStore):
super().__init__(store)
def log(self, trace: CallTrace) -> None:
self.traces.append(trace)
Reported by Pylint.
Line: 69
Column: 13
def filter(
self,
qualified_name: str,
qualname_prefix: Optional[str] = None,
limit: int = 2000
) -> List[CallTraceThunk]:
return self.trace_records[qualified_name]
def analyze(self, qualified_name: str) -> Dict:
Reported by Pylint.
Line: 70
Column: 13
self,
qualified_name: str,
qualname_prefix: Optional[str] = None,
limit: int = 2000
) -> List[CallTraceThunk]:
return self.trace_records[qualified_name]
def analyze(self, qualified_name: str) -> Dict:
# Analyze the types for the given module
Reported by Pylint.
Line: 109
Column: 3
_all_type = _all_type.lstrip(" ") # Remove any trailing spaces
if len(types) == 2 and 'NoneType' in _all_type:
# TODO: To remove this check once Union suppport in TorchScript lands.
all_args[arg] = {get_optional_of_element_type(_all_type)}
elif len(types) > 1:
all_args[arg] = {'Any'}
else:
all_args[arg] = {_all_type[:-1]}
Reported by Pylint.
Line: 1
Column: 1
import inspect
import typing
import pathlib
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
Reported by Pylint.
Line: 5
Column: 1
import typing
import pathlib
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
try:
Reported by Pylint.
Line: 6
Column: 1
import pathlib
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
Reported by Pylint.
Line: 7
Column: 1
import torch
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
Reported by Pylint.
Line: 13
Column: 1
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
from monkeytype.db.base import CallTraceThunk, CallTraceStore, CallTraceStoreLogger # type: ignore[import]
from monkeytype.config import _startswith, LIB_PATHS # type: ignore[import]
from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import]
except ImportError:
_IS_MONKEYTYPE_INSTALLED = False
Reported by Pylint.
torch/fx/experimental/unification/multipledispatch/conflict.py
33 issues
Line: 1
Column: 1
from .utils import _toposort, groupby
from .variadic import isvariadic
class AmbiguityWarning(Warning):
pass
def supercedes(a, b):
Reported by Pylint.
Line: 2
Column: 1
from .utils import _toposort, groupby
from .variadic import isvariadic
class AmbiguityWarning(Warning):
pass
def supercedes(a, b):
Reported by Pylint.
Line: 75
Column: 58
def ambiguous(a, b):
""" A is consistent with B but neither is strictly more specific """
return consistent(a, b) and not (supercedes(a, b) or supercedes(b, a))
def ambiguities(signatures):
""" All signature pairs such that A is ambiguous with B """
signatures = list(map(tuple, signatures))
Reported by Pylint.
Line: 103
Column: 38
"""
# A either supercedes B and B does not supercede A or if B does then call
# tie_breaker
return supercedes(a, b) and (not supercedes(b, a) or tie_breaker(a) > tie_breaker(b))
def ordering(signatures):
""" A sane ordering of signatures to check, first to last
Topoological sort of edges as given by ``edge`` and ``supercedes``
Reported by Pylint.
Line: 1
Column: 1
from .utils import _toposort, groupby
from .variadic import isvariadic
class AmbiguityWarning(Warning):
pass
def supercedes(a, b):
Reported by Pylint.
Line: 5
Column: 1
from .variadic import isvariadic
class AmbiguityWarning(Warning):
pass
def supercedes(a, b):
""" A is consistent and strictly more specific than B """
Reported by Pylint.
Line: 9
Column: 1
pass
def supercedes(a, b):
""" A is consistent and strictly more specific than B """
if len(a) < len(b):
# only case is if a is empty and b is variadic
return not a and len(b) == 1 and isvariadic(b[-1])
elif len(a) == len(b):
Reported by Pylint.
Line: 9
Column: 1
pass
def supercedes(a, b):
""" A is consistent and strictly more specific than B """
if len(a) < len(b):
# only case is if a is empty and b is variadic
return not a and len(b) == 1 and isvariadic(b[-1])
elif len(a) == len(b):
Reported by Pylint.
Line: 11
Column: 5
def supercedes(a, b):
""" A is consistent and strictly more specific than B """
if len(a) < len(b):
# only case is if a is empty and b is variadic
return not a and len(b) == 1 and isvariadic(b[-1])
elif len(a) == len(b):
return all(map(issubclass, a, b))
else:
Reported by Pylint.
Line: 18
Column: 9
return all(map(issubclass, a, b))
else:
# len(a) > len(b)
p1 = 0
p2 = 0
while p1 < len(a) and p2 < len(b):
cur_a = a[p1]
cur_b = b[p2]
if not (isvariadic(cur_a) or isvariadic(cur_b)):
Reported by Pylint.
benchmarks/sparse/spmm.py
33 issues
Line: 3
Column: 1
import argparse
import sys
import torch
from utils import gen_sparse_csr, gen_sparse_coo, Event
def test_sparse_csr(m, n, k, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
Reported by Pylint.
Line: 4
Column: 1
import argparse
import sys
import torch
from utils import gen_sparse_csr, gen_sparse_coo, Event
def test_sparse_csr(m, n, k, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
Reported by Pylint.
Line: 42
Column: 16
start = Event(enable_timing=True)
stop = Event(enable_timing=True)
coo, csr = gen_sparse_coo_and_csr((m, k), nnz)
mat = torch.randn((k, n), dtype=torch.double)
times = []
for _ in range(test_count):
start.record()
Reported by Pylint.
Line: 98
Column: 30
elif args.format == 'coo':
time = test_sparse_coo(m, n, k, nnz, test_count)
elif args.format == 'both':
time_coo, time_csr = test_sparse_coo_and_csr(m, nnz, test_count)
if args.format == 'both':
print("format=coo", " nnz_ratio=", nnz_ratio, " m=", m, " n=", n, " k=", k, " time=", time_coo, file=outfile)
print("format=csr", " nnz_ratio=", nnz_ratio, " m=", m, " n=", n, " k=", k, " time=", time_csr, file=outfile)
else:
Reported by Pylint.
Line: 98
Column: 30
elif args.format == 'coo':
time = test_sparse_coo(m, n, k, nnz, test_count)
elif args.format == 'both':
time_coo, time_csr = test_sparse_coo_and_csr(m, nnz, test_count)
if args.format == 'both':
print("format=coo", " nnz_ratio=", nnz_ratio, " m=", m, " n=", n, " k=", k, " time=", time_coo, file=outfile)
print("format=csr", " nnz_ratio=", nnz_ratio, " m=", m, " n=", n, " k=", k, " time=", time_csr, file=outfile)
else:
Reported by Pylint.
Line: 6
Column: 24
import torch
from utils import gen_sparse_csr, gen_sparse_coo, Event
def test_sparse_csr(m, n, k, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
csr = gen_sparse_csr((m, k), nnz)
mat = torch.randn(k, n, dtype=torch.double)
Reported by Pylint.
Line: 6
Column: 27
import torch
from utils import gen_sparse_csr, gen_sparse_coo, Event
def test_sparse_csr(m, n, k, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
csr = gen_sparse_csr((m, k), nnz)
mat = torch.randn(k, n, dtype=torch.double)
Reported by Pylint.
Line: 6
Column: 30
import torch
from utils import gen_sparse_csr, gen_sparse_coo, Event
def test_sparse_csr(m, n, k, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
csr = gen_sparse_csr((m, k), nnz)
mat = torch.randn(k, n, dtype=torch.double)
Reported by Pylint.
Line: 6
Column: 35
import torch
from utils import gen_sparse_csr, gen_sparse_coo, Event
def test_sparse_csr(m, n, k, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
csr = gen_sparse_csr((m, k), nnz)
mat = torch.randn(k, n, dtype=torch.double)
Reported by Pylint.
Line: 6
Column: 21
import torch
from utils import gen_sparse_csr, gen_sparse_coo, Event
def test_sparse_csr(m, n, k, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
csr = gen_sparse_csr((m, k), nnz)
mat = torch.randn(k, n, dtype=torch.double)
Reported by Pylint.
torch/jit/_freeze.py
33 issues
Line: 111
Column: 33
preserved_attrs = preserved_attrs if preserved_attrs is not None else []
out = RecursiveScriptModule(torch._C._freeze_module(mod._c, preserved_attrs))
RecursiveScriptModule._finalize_scriptmodule(out)
run_frozen_optimizations(out, optimize_numerics)
return out
Reported by Pylint.
Line: 111
Column: 57
preserved_attrs = preserved_attrs if preserved_attrs is not None else []
out = RecursiveScriptModule(torch._C._freeze_module(mod._c, preserved_attrs))
RecursiveScriptModule._finalize_scriptmodule(out)
run_frozen_optimizations(out, optimize_numerics)
return out
Reported by Pylint.
Line: 111
Column: 33
preserved_attrs = preserved_attrs if preserved_attrs is not None else []
out = RecursiveScriptModule(torch._C._freeze_module(mod._c, preserved_attrs))
RecursiveScriptModule._finalize_scriptmodule(out)
run_frozen_optimizations(out, optimize_numerics)
return out
Reported by Pylint.
Line: 112
Column: 5
preserved_attrs = preserved_attrs if preserved_attrs is not None else []
out = RecursiveScriptModule(torch._C._freeze_module(mod._c, preserved_attrs))
RecursiveScriptModule._finalize_scriptmodule(out)
run_frozen_optimizations(out, optimize_numerics)
return out
def run_frozen_optimizations(mod, optimize_numerics: bool = True):
Reported by Pylint.
Line: 156
Column: 3
assert "batch_norm" not in str(frozen_mod.graph)
"""
# xxx: keep in sync with frozen_graph_optimization.cpp
# intentionally duplicated to make to make it easier to create custom optimization sequence
torch._C._jit_pass_remove_dropout(mod._c)
if optimize_numerics:
# run a couple times to capture Conv -> Mul -> Add etc
for _ in range(2):
Reported by Pylint.
Line: 158
Column: 5
"""
# xxx: keep in sync with frozen_graph_optimization.cpp
# intentionally duplicated to make to make it easier to create custom optimization sequence
torch._C._jit_pass_remove_dropout(mod._c)
if optimize_numerics:
# run a couple times to capture Conv -> Mul -> Add etc
for _ in range(2):
torch._C._jit_pass_fold_frozen_conv_bn(mod.graph)
torch._C._jit_pass_fold_frozen_conv_add_or_sub(mod.graph)
Reported by Pylint.
Line: 158
Column: 39
"""
# xxx: keep in sync with frozen_graph_optimization.cpp
# intentionally duplicated to make to make it easier to create custom optimization sequence
torch._C._jit_pass_remove_dropout(mod._c)
if optimize_numerics:
# run a couple times to capture Conv -> Mul -> Add etc
for _ in range(2):
torch._C._jit_pass_fold_frozen_conv_bn(mod.graph)
torch._C._jit_pass_fold_frozen_conv_add_or_sub(mod.graph)
Reported by Pylint.
Line: 158
Column: 5
"""
# xxx: keep in sync with frozen_graph_optimization.cpp
# intentionally duplicated to make to make it easier to create custom optimization sequence
torch._C._jit_pass_remove_dropout(mod._c)
if optimize_numerics:
# run a couple times to capture Conv -> Mul -> Add etc
for _ in range(2):
torch._C._jit_pass_fold_frozen_conv_bn(mod.graph)
torch._C._jit_pass_fold_frozen_conv_add_or_sub(mod.graph)
Reported by Pylint.
Line: 162
Column: 13
if optimize_numerics:
# run a couple times to capture Conv -> Mul -> Add etc
for _ in range(2):
torch._C._jit_pass_fold_frozen_conv_bn(mod.graph)
torch._C._jit_pass_fold_frozen_conv_add_or_sub(mod.graph)
torch._C._jit_pass_fold_frozen_conv_mul_or_div(mod.graph)
def optimize_for_inference(mod: ScriptModule) -> ScriptModule:
"""
Reported by Pylint.
Line: 162
Column: 13
if optimize_numerics:
# run a couple times to capture Conv -> Mul -> Add etc
for _ in range(2):
torch._C._jit_pass_fold_frozen_conv_bn(mod.graph)
torch._C._jit_pass_fold_frozen_conv_add_or_sub(mod.graph)
torch._C._jit_pass_fold_frozen_conv_mul_or_div(mod.graph)
def optimize_for_inference(mod: ScriptModule) -> ScriptModule:
"""
Reported by Pylint.
benchmarks/operator_benchmark/pt/qarithmetic_test.py
33 issues
Line: 1
Column: 1
import torch
from torch._ops import ops
import operator_benchmark as op_bench
qarithmetic_binary_configs = op_bench.cross_product_configs(
N=(2, 8, 64, 512),
dtype=(torch.quint8, torch.qint8, torch.qint32),
# contig=(False, True), # TODO: Reenable this after #29435
contig=(True,),
Reported by Pylint.
Line: 2
Column: 1
import torch
from torch._ops import ops
import operator_benchmark as op_bench
qarithmetic_binary_configs = op_bench.cross_product_configs(
N=(2, 8, 64, 512),
dtype=(torch.quint8, torch.qint8, torch.qint32),
# contig=(False, True), # TODO: Reenable this after #29435
contig=(True,),
Reported by Pylint.
Line: 5
Column: 30
from torch._ops import ops
import operator_benchmark as op_bench
qarithmetic_binary_configs = op_bench.cross_product_configs(
N=(2, 8, 64, 512),
dtype=(torch.quint8, torch.qint8, torch.qint32),
# contig=(False, True), # TODO: Reenable this after #29435
contig=(True,),
tags=('short',)
Reported by Pylint.
Line: 14
Column: 26
)
qarithmetic_binary_ops = op_bench.op_list(
attrs=(
('add', ops.quantized.add),
('add_relu', ops.quantized.add_relu),
('mul', ops.quantized.mul),
),
Reported by Pylint.
Line: 23
Column: 33
attr_names=('op_name', 'op_func'),
)
qarithmetic_binary_scalar_ops = op_bench.op_list(
attrs=(
('add_scalar', ops.quantized.add_scalar),
('mul_scalar', ops.quantized.mul_scalar),
),
attr_names=('op_name', 'op_func'),
Reported by Pylint.
Line: 31
Column: 49
attr_names=('op_name', 'op_func'),
)
class _QFunctionalBinaryArithmeticBenchmarkBase(op_bench.TorchBenchmarkBase):
def setup(self, N, dtype, contig):
self.qfunctional = torch.nn.quantized.QFunctional()
# TODO: Consider more diverse shapes
f_input = (torch.rand(N, N) - 0.5) * 256
Reported by Pylint.
Line: 63
Column: 1
return self.op_func(q_input_a, q_input_b, scale=scale, zero_point=zero_point)
op_bench.generate_pt_tests_from_op_list(qarithmetic_binary_ops,
qarithmetic_binary_configs,
QFunctionalBenchmark)
class QFunctionalScalarBenchmark(_QFunctionalBinaryArithmeticBenchmarkBase):
Reported by Pylint.
Line: 81
Column: 1
return self.op_func(q_input, scalar_input)
op_bench.generate_pt_tests_from_op_list(qarithmetic_binary_scalar_ops,
qarithmetic_binary_configs,
QFunctionalScalarBenchmark)
if __name__ == '__main__':
Reported by Pylint.
Line: 8
Column: 28
qarithmetic_binary_configs = op_bench.cross_product_configs(
N=(2, 8, 64, 512),
dtype=(torch.quint8, torch.qint8, torch.qint32),
# contig=(False, True), # TODO: Reenable this after #29435
contig=(True,),
tags=('short',)
)
Reported by Pylint.
Line: 33
Column: 9
class _QFunctionalBinaryArithmeticBenchmarkBase(op_bench.TorchBenchmarkBase):
def setup(self, N, dtype, contig):
self.qfunctional = torch.nn.quantized.QFunctional()
# TODO: Consider more diverse shapes
f_input = (torch.rand(N, N) - 0.5) * 256
self.scale = 1.0
self.zero_point = 0
Reported by Pylint.
test/quantization/eager/test_equalize_eager.py
33 issues
Line: 1
Column: 1
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.quantization.fuse_modules import fuse_modules
import torch.quantization._equalize as _equalize
import copy
Reported by Pylint.
Line: 2
Column: 1
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.quantization.fuse_modules import fuse_modules
import torch.quantization._equalize as _equalize
import copy
Reported by Pylint.
Line: 4
Column: 1
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.quantization.fuse_modules import fuse_modules
import torch.quantization._equalize as _equalize
import copy
Reported by Pylint.
Line: 5
Column: 1
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.quantization.fuse_modules import fuse_modules
import torch.quantization._equalize as _equalize
import copy
Reported by Pylint.
Line: 7
Column: 1
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.quantization.fuse_modules import fuse_modules
import torch.quantization._equalize as _equalize
import copy
class TestEqualizeEager(QuantizationTestCase):
def checkChannelsEqualized(self, tensor1, tensor2, output_axis, input_axis):
Reported by Pylint.
Line: 29
Column: 20
curr = model
name = name.split('.')
for subname in name:
curr = curr._modules[subname]
return curr
def test_cross_layer_equalization(self):
''' applies _equalize.cross_layer_equalization on two modules and checks
to make sure channels ranges are equivalent
Reported by Pylint.
Line: 95
Column: 9
self.checkChannelsEqualized(linear1.weight, linear2.weight, 0, 1)
self.checkChannelsEqualized(linear2.weight, linear3.weight, 0, 1)
input = torch.randn(20, 3)
self.assertEqual(chain1(input), chain2(input))
def test_equalize_fused_convrelu(self):
''' Checks to see if eager mode equalization supports fused
ConvReLU2d models
Reported by Pylint.
Line: 140
Column: 9
self.checkChannelsEqualized(conv1.weight, conv2.weight, 0, 1)
self.checkChannelsEqualized(conv2.weight, conv3.weight, 0, 1)
input = torch.randn(3, 3, 1, 1)
self.assertEqual(fused_model1(input), fused_model2(input))
self.assertEqual(fused_model1(input), model(input))
def test_equalize_fused_linearrelu(self):
''' Checks to see if eager mode equalization supports fused
Reported by Pylint.
Line: 186
Column: 9
self.checkChannelsEqualized(linear1.weight, linear2.weight, 0, 1)
self.checkChannelsEqualized(linear2.weight, linear3.weight, 0, 1)
input = torch.randn(20, 3)
self.assertEqual(fused_model1(input), fused_model2(input))
self.assertEqual(fused_model1(input), model(input))
Reported by Pylint.
Line: 1
Column: 1
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.quantization.fuse_modules import fuse_modules
import torch.quantization._equalize as _equalize
import copy
Reported by Pylint.