The following issues were found
caffe2/python/operator_test/recurrent_network_test.py
31 issues
Line: 8
Column: 1
from caffe2.python import recurrent, workspace
from caffe2.python.model_helper import ModelHelper
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 11
Column: 1
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class RecurrentNetworkTest(serial.SerializedTestCase):
@given(T=st.integers(1, 4),
n=st.integers(1, 5),
Reported by Pylint.
Line: 109
Column: 9
def simple_rnn(self, T, n, d, model, step, input_t, output_t, output_t_prev,
input_blob, initial_input_blob):
input = np.random.randn(T, n, d).astype(np.float32)
initial_input = np.random.randn(1, n, d).astype(np.float32)
print(locals())
recurrent.recurrent_net(
net=model.net,
cell_net=step.net,
Reported by Pylint.
Line: 123
Column: 14
workspace.blobs[input_blob] = input
workspace.blobs[initial_input_blob] = initial_input
op = model.net._net.op[-1]
# Just conviniently store all inputs in an array in the same
# order as op.input
inputs = [workspace.blobs[name] for name in op.input]
def reference(input, initial_input):
Reported by Pylint.
Line: 128
Column: 23
# order as op.input
inputs = [workspace.blobs[name] for name in op.input]
def reference(input, initial_input):
global_ws_name = workspace.CurrentWorkspace()
input_all = workspace.blobs[input_blob]
workspace.SwitchWorkspace("ref", create_if_missing=True)
workspace.blobs[input_blob] = input
Reported by Pylint.
Line: 297
Column: 13
)
step_model = ModelHelper(name='step_model', param_model=model)
(
fake_input_t,
timestep,
input_state_t_prev,
) = step_model.net.AddExternalInputs(
'fake_input_t',
'timestep',
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import recurrent, workspace
from caffe2.python.model_helper import ModelHelper
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 14
Column: 1
import hypothesis.strategies as st
import numpy as np
class RecurrentNetworkTest(serial.SerializedTestCase):
@given(T=st.integers(1, 4),
n=st.integers(1, 5),
d=st.integers(1, 5))
@settings(deadline=10000)
def test_sum_mul(self, T, n, d):
Reported by Pylint.
Line: 19
Column: 5
n=st.integers(1, 5),
d=st.integers(1, 5))
@settings(deadline=10000)
def test_sum_mul(self, T, n, d):
model = ModelHelper(name='external')
input_blob, initial_input_blob = model.net.AddExternalInputs(
'input', 'initial_input')
Reported by Pylint.
Line: 19
Column: 5
n=st.integers(1, 5),
d=st.integers(1, 5))
@settings(deadline=10000)
def test_sum_mul(self, T, n, d):
model = ModelHelper(name='external')
input_blob, initial_input_blob = model.net.AddExternalInputs(
'input', 'initial_input')
Reported by Pylint.
torch/optim/_multi_tensor/sgd.py
31 issues
Line: 2
Column: 1
import torch
from ..optimizer import Optimizer, required
from collections import defaultdict
class SGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Reported by Pylint.
Line: 115
Column: 25
return loss
if weight_decay != 0:
grads = torch._foreach_add(grads, params_with_grad, alpha=weight_decay)
if momentum != 0:
bufs = []
all_states_with_momentum_buffer = True
Reported by Pylint.
Line: 129
Column: 21
bufs.append(states[i]['momentum_buffer'])
if all_states_with_momentum_buffer:
torch._foreach_mul_(bufs, momentum)
torch._foreach_add_(bufs, grads, alpha=1 - dampening)
else:
bufs = []
for i in range(len(states)):
if 'momentum_buffer' not in states[i]:
Reported by Pylint.
Line: 130
Column: 21
if all_states_with_momentum_buffer:
torch._foreach_mul_(bufs, momentum)
torch._foreach_add_(bufs, grads, alpha=1 - dampening)
else:
bufs = []
for i in range(len(states)):
if 'momentum_buffer' not in states[i]:
buf = states[i]['momentum_buffer'] = torch.clone(grads[i]).detach()
Reported by Pylint.
Line: 135
Column: 66
bufs = []
for i in range(len(states)):
if 'momentum_buffer' not in states[i]:
buf = states[i]['momentum_buffer'] = torch.clone(grads[i]).detach()
else:
buf = states[i]['momentum_buffer']
buf.mul_(momentum).add_(grads[i], alpha=1 - dampening)
bufs.append(buf)
Reported by Pylint.
Line: 143
Column: 21
bufs.append(buf)
if nesterov:
torch._foreach_add_(grads, bufs, alpha=momentum)
else:
grads = bufs
if not has_sparse_grad:
torch._foreach_add_(params_with_grad, grads, alpha=-group['lr'])
Reported by Pylint.
Line: 148
Column: 17
grads = bufs
if not has_sparse_grad:
torch._foreach_add_(params_with_grad, grads, alpha=-group['lr'])
else:
# foreach APIs dont support sparse
for i in range(len(params_with_grad)):
params_with_grad[i].add_(grads[i], alpha=-group['lr'])
Reported by Pylint.
Line: 177
Column: 21
for _, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
torch._foreach_zero_(grads)
Reported by Pylint.
Line: 115
Column: 25
return loss
if weight_decay != 0:
grads = torch._foreach_add(grads, params_with_grad, alpha=weight_decay)
if momentum != 0:
bufs = []
all_states_with_momentum_buffer = True
Reported by Pylint.
Line: 129
Column: 21
bufs.append(states[i]['momentum_buffer'])
if all_states_with_momentum_buffer:
torch._foreach_mul_(bufs, momentum)
torch._foreach_add_(bufs, grads, alpha=1 - dampening)
else:
bufs = []
for i in range(len(states)):
if 'momentum_buffer' not in states[i]:
Reported by Pylint.
benchmarks/tensorexpr/rnn_eltwise.py
31 issues
Line: 1
Column: 1
from . import benchmark
import torch
class RNNEltwise(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, hs):
super().__init__(mode, device, dtype)
self.b = b
self.hs = hs
self.input = self.rand(
Reported by Pylint.
Line: 2
Column: 1
from . import benchmark
import torch
class RNNEltwise(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, hs):
super().__init__(mode, device, dtype)
self.b = b
self.hs = hs
self.input = self.rand(
Reported by Pylint.
Line: 32
Column: 23
self.b_hh,
]
def forward(self, input, hx, cx, b_ih, b_hh):
gates = input + hx + b_ih + b_hh
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
Reported by Pylint.
Line: 1
Column: 1
from . import benchmark
import torch
class RNNEltwise(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, hs):
super().__init__(mode, device, dtype)
self.b = b
self.hs = hs
self.input = self.rand(
Reported by Pylint.
Line: 2
Column: 1
from . import benchmark
import torch
class RNNEltwise(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, hs):
super().__init__(mode, device, dtype)
self.b = b
self.hs = hs
self.input = self.rand(
Reported by Pylint.
Line: 4
Column: 1
from . import benchmark
import torch
class RNNEltwise(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, hs):
super().__init__(mode, device, dtype)
self.b = b
self.hs = hs
self.input = self.rand(
Reported by Pylint.
Line: 4
Column: 1
from . import benchmark
import torch
class RNNEltwise(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, hs):
super().__init__(mode, device, dtype)
self.b = b
self.hs = hs
self.input = self.rand(
Reported by Pylint.
Line: 5
Column: 5
import torch
class RNNEltwise(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, hs):
super().__init__(mode, device, dtype)
self.b = b
self.hs = hs
self.input = self.rand(
[b, 4 * hs], device=device, dtype=dtype, requires_grad=self.requires_grad
Reported by Pylint.
Line: 5
Column: 5
import torch
class RNNEltwise(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, hs):
super().__init__(mode, device, dtype)
self.b = b
self.hs = hs
self.input = self.rand(
[b, 4 * hs], device=device, dtype=dtype, requires_grad=self.requires_grad
Reported by Pylint.
Line: 5
Column: 5
import torch
class RNNEltwise(benchmark.Benchmark):
def __init__(self, mode, device, dtype, b, hs):
super().__init__(mode, device, dtype)
self.b = b
self.hs = hs
self.input = self.rand(
[b, 4 * hs], device=device, dtype=dtype, requires_grad=self.requires_grad
Reported by Pylint.
tools/fast_nvcc/fast_nvcc.py
31 issues
Line: 20
Column: 1
from typing import (Awaitable, DefaultDict, Dict, List, Match, Optional, Set,
cast)
from typing_extensions import TypedDict
help_msg = '''fast_nvcc [OPTION]... -- [NVCC_ARG]...
Run the commands given by nvcc --dryrun, in parallel.
Reported by Pylint.
Line: 137
Column: 14
"""
Return parsed environment variables and commands from nvcc --dryrun.
"""
result = subprocess.run( # type: ignore[call-overload]
[binary, '--dryrun'] + args,
capture_output=True,
encoding='ascii', # this is just a guess
)
print(result.stdout, end='')
Reported by Pylint.
Line: 195
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b303-md5
# unique), but it seems difficult to find a rule that reproduces the
# real suffixes, so here's one that, while inaccurate, is at least
# hopefully as straightforward as possible
suffix = hashlib.md5(str.encode(middle)).hexdigest()[:8]
return f'_{len(middle)}_{middle}_{suffix}'
def unique_module_id_files(commands: List[str]) -> List[str]:
"""
Reported by Bandit.
Line: 209
Column: 57
arr = []
def uniqueify(s: Match[str]) -> str:
filename = re.sub(r'\-(\d+)', r'-\1-' + str(i), s.group(0))
arr.append(filename)
return filename
line = re.sub(re_tmp + r'.module_id', uniqueify, line)
line = re.sub(r'\s*\-\-gen\_module\_id\_file\s*', ' ', line)
Reported by Pylint.
Line: 210
Column: 13
def uniqueify(s: Match[str]) -> str:
filename = re.sub(r'\-(\d+)', r'-\1-' + str(i), s.group(0))
arr.append(filename)
return filename
line = re.sub(re_tmp + r'.module_id', uniqueify, line)
line = re.sub(r'\s*\-\-gen\_module\_id\_file\s*', ' ', line)
if arr:
Reported by Pylint.
Line: 216
Column: 13
line = re.sub(re_tmp + r'.module_id', uniqueify, line)
line = re.sub(r'\s*\-\-gen\_module\_id\_file\s*', ' ', line)
if arr:
filename, = arr
if not module_id:
module_id = module_id_contents(shlex.split(line))
uniqueified.append(f"echo -n '{module_id}' > '{filename}'")
uniqueified.append(line)
return uniqueified
Reported by Pylint.
Line: 265
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b108_hardcoded_tmp_directory.html
"""
Return fully-qualified names of all tmp files referenced by command.
"""
return [f'/tmp/{match.group(1)}' for match in re.finditer(re_tmp, command)]
def nvcc_data_dependencies(commands: List[str]) -> Graph:
"""
Return a list of the set of dependencies for each command.
Reported by Bandit.
Line: 1
Column: 1
#!/usr/bin/env python3
import argparse
import asyncio
import collections
import csv
import hashlib
import itertools
import os
Reported by Pylint.
Line: 14
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
import re
import shlex
import shutil
import subprocess
import sys
import time
from typing import (Awaitable, DefaultDict, Dict, List, Match, Optional, Set,
cast)
Reported by Bandit.
Line: 22
Column: 1
from typing_extensions import TypedDict
help_msg = '''fast_nvcc [OPTION]... -- [NVCC_ARG]...
Run the commands given by nvcc --dryrun, in parallel.
All flags for this script itself (see the "optional arguments" section
of --help) must be passed before the first "--". Everything after that
Reported by Pylint.
torch/distributed/pipeline/sync/pipe.py
31 issues
Line: 17
Column: 1
import torch.autograd
import torch.cuda
from . import microbatch
from .batchnorm import DeferredBatchNorm
from .pipeline import Pipeline
from .skip.layout import inspect_skip_layout
from .skip.skippable import verify_skippables
from .stream import AbstractStream, new_stream
Reported by Pylint.
Line: 18
Column: 1
import torch.cuda
from . import microbatch
from .batchnorm import DeferredBatchNorm
from .pipeline import Pipeline
from .skip.layout import inspect_skip_layout
from .skip.skippable import verify_skippables
from .stream import AbstractStream, new_stream
Reported by Pylint.
Line: 19
Column: 1
from . import microbatch
from .batchnorm import DeferredBatchNorm
from .pipeline import Pipeline
from .skip.layout import inspect_skip_layout
from .skip.skippable import verify_skippables
from .stream import AbstractStream, new_stream
__all__ = ["Pipe"]
Reported by Pylint.
Line: 20
Column: 1
from . import microbatch
from .batchnorm import DeferredBatchNorm
from .pipeline import Pipeline
from .skip.layout import inspect_skip_layout
from .skip.skippable import verify_skippables
from .stream import AbstractStream, new_stream
__all__ = ["Pipe"]
Reported by Pylint.
Line: 21
Column: 1
from .batchnorm import DeferredBatchNorm
from .pipeline import Pipeline
from .skip.layout import inspect_skip_layout
from .skip.skippable import verify_skippables
from .stream import AbstractStream, new_stream
__all__ = ["Pipe"]
Reported by Pylint.
Line: 22
Column: 1
from .pipeline import Pipeline
from .skip.layout import inspect_skip_layout
from .skip.skippable import verify_skippables
from .stream import AbstractStream, new_stream
__all__ = ["Pipe"]
Device = Union[torch.device, int, str]
Reported by Pylint.
Line: 27
Column: 16
__all__ = ["Pipe"]
Device = Union[torch.device, int, str]
Devices = Union[Iterable[Device], List[Device]]
Tensors = Sequence[Tensor]
TensorOrTensors = Union[Tensor, Tensors]
Reported by Pylint.
Line: 35
Column: 14
if TYPE_CHECKING:
# Typechecking: nn.Module is not a Generic
Module = nn.Module[TensorOrTensors] # type: ignore[type-arg]
NamedModules = OrderedDict[str, Module]
else:
Module = nn.Module
NamedModules = OrderedDict
Reported by Pylint.
Line: 36
Column: 20
if TYPE_CHECKING:
# Typechecking: nn.Module is not a Generic
Module = nn.Module[TensorOrTensors] # type: ignore[type-arg]
NamedModules = OrderedDict[str, Module]
else:
Module = nn.Module
NamedModules = OrderedDict
Reported by Pylint.
Line: 71
Column: 75
def _verify_splitting(
module: nn.Sequential, partitions: List[nn.Sequential], devices: List[torch.device]
) -> None:
num_parameters = len(list(module.parameters()))
num_child_parameters = sum(len(list(child.parameters())) for child in module.children())
if num_parameters == num_child_parameters:
return
Reported by Pylint.
caffe2/quantization/server/elementwise_mul_dnnlowp_op_test.py
31 issues
Line: 6
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given, settings
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
Reported by Pylint.
Line: 28
Column: 71
)
@settings(deadline=None)
def test_dnnlowp_elementwise_mul_int(
self, N, is_empty, in_quantized, out_quantized, in_place, gc, dc
):
if is_empty:
N = 0
# FIXME: DNNLOWP Mul doesn't support inplace operation and
# dequantize_output=1 at the same time
Reported by Pylint.
Line: 32
Column: 3
):
if is_empty:
N = 0
# FIXME: DNNLOWP Mul doesn't support inplace operation and
# dequantize_output=1 at the same time
if in_place[0] or in_place[1]:
in_quantized = True
out_quantized = True
Reported by Pylint.
Line: 104
Column: 58
@given(**hu.gcs_cpu_only)
@settings(deadline=None)
def test_dnnlowp_elementwise_mul_broadcast(self, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
min_ = -100
max_ = min_ + 255
A = np.round(np.random.rand(2, 3, 4, 5) * (max_ - min_) + min_)
A = A.astype(np.float32)
Reported by Pylint.
Line: 144
Column: 63
@given(**hu.gcs_cpu_only)
@settings(deadline=None)
def test_dnnlowp_elementwise_mul_broadcast_axis(self, gc, dc):
for bdim, axis in [
((3, 4), 1), # broadcasting intermediate dimensions
((2,), 0), # broadcasting the first dimension
((1, 4, 1), 1),
]:
Reported by Pylint.
Line: 1
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
Reported by Pylint.
Line: 17
Column: 1
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPMulOpTest(hu.HypothesisTestCase):
@given(
N=st.integers(32, 256),
is_empty=st.booleans(),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
Reported by Pylint.
Line: 26
Column: 5
in_place=st.sampled_from([(False, False), (True, False), (False, True)]),
**hu.gcs_cpu_only
)
@settings(deadline=None)
def test_dnnlowp_elementwise_mul_int(
self, N, is_empty, in_quantized, out_quantized, in_place, gc, dc
):
if is_empty:
N = 0
Reported by Pylint.
Line: 26
Column: 5
in_place=st.sampled_from([(False, False), (True, False), (False, True)]),
**hu.gcs_cpu_only
)
@settings(deadline=None)
def test_dnnlowp_elementwise_mul_int(
self, N, is_empty, in_quantized, out_quantized, in_place, gc, dc
):
if is_empty:
N = 0
Reported by Pylint.
scripts/release_notes/commitlist.py
31 issues
Line: 11
Column: 1
import re
"""
Example Usages
Create a new commitlist for consumption by categorize.py.
Said commitlist contains commits between v1.5.0 and f5bc91f851.
Reported by Pylint.
Line: 1
Column: 1
import argparse
from common import run, topics
from collections import defaultdict
import os
import csv
import pprint
from common import CommitDataCache
import re
Reported by Pylint.
Line: 3
Column: 1
import argparse
from common import run, topics
from collections import defaultdict
import os
import csv
import pprint
from common import CommitDataCache
import re
Reported by Pylint.
Line: 4
Column: 1
import argparse
from common import run, topics
from collections import defaultdict
import os
import csv
import pprint
from common import CommitDataCache
import re
Reported by Pylint.
Line: 5
Column: 1
from common import run, topics
from collections import defaultdict
import os
import csv
import pprint
from common import CommitDataCache
import re
Reported by Pylint.
Line: 6
Column: 1
from collections import defaultdict
import os
import csv
import pprint
from common import CommitDataCache
import re
"""
Reported by Pylint.
Line: 8
Column: 1
import csv
import pprint
from common import CommitDataCache
import re
"""
Example Usages
Reported by Pylint.
Line: 25
Column: 1
"""
class Commit:
def __init__(self, commit_hash, category, topic, title):
self.commit_hash = commit_hash
self.category = category
self.topic = topic
self.title = title
Reported by Pylint.
Line: 43
Column: 1
def __repr__(self):
return f'Commit({self.commit_hash}, {self.category}, {self.topic}, {self.title})'
class CommitList:
# NB: Private ctor. Use `from_existing` or `create_new`.
def __init__(self, path, commits):
self.path = path
self.commits = commits
Reported by Pylint.
Line: 50
Column: 5
self.commits = commits
@staticmethod
def from_existing(path):
commits = CommitList.read_from_disk(path)
return CommitList(path, commits)
@staticmethod
def create_new(path, base_version, new_version):
Reported by Pylint.
torch/_linalg_utils.py
31 issues
Line: 14
Column: 28
def is_sparse(A):
"""Check if tensor A is a sparse tensor"""
if isinstance(A, torch.Tensor):
return A.layout == torch.sparse_coo
error_str = "expected Tensor"
if not torch.jit.is_scripting():
error_str += " but got {}".format(type(A))
raise TypeError(error_str)
Reported by Pylint.
Line: 27
Column: 33
Integer types map to float32.
"""
dtype = A.dtype
if dtype in (torch.float16, torch.float32, torch.float64):
return dtype
return torch.float32
def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:
Reported by Pylint.
Line: 27
Column: 18
Integer types map to float32.
"""
dtype = A.dtype
if dtype in (torch.float16, torch.float32, torch.float64):
return dtype
return torch.float32
def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:
Reported by Pylint.
Line: 27
Column: 48
Integer types map to float32.
"""
dtype = A.dtype
if dtype in (torch.float16, torch.float32, torch.float64):
return dtype
return torch.float32
def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:
Reported by Pylint.
Line: 29
Column: 12
dtype = A.dtype
if dtype in (torch.float16, torch.float32, torch.float64):
return dtype
return torch.float32
def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:
"""Multiply two matrices.
Reported by Pylint.
Line: 42
Column: 12
return B
if is_sparse(A):
return torch.sparse.mm(A, B)
return torch.matmul(A, B)
def conjugate(A):
"""Return conjugate of tensor A.
Reported by Pylint.
Line: 87
Column: 13
# torch.orgqr is not available in CUDA
Q = torch.linalg.qr(A).Q
else:
Q = torch.orgqr(*torch.geqrf(A))
return Q
def symeig(A: Tensor, largest: Optional[bool] = False) -> Tuple[Tensor, Tensor]:
"""Return eigenpairs of A with specified ordering.
Reported by Pylint.
Line: 87
Column: 26
# torch.orgqr is not available in CUDA
Q = torch.linalg.qr(A).Q
else:
Q = torch.orgqr(*torch.geqrf(A))
return Q
def symeig(A: Tensor, largest: Optional[bool] = False) -> Tuple[Tensor, Tensor]:
"""Return eigenpairs of A with specified ordering.
Reported by Pylint.
Line: 99
Column: 13
E, Z = torch.linalg.eigh(A, UPLO='U')
# assuming that E is ordered
if largest:
E = torch.flip(E, dims=(-1,))
Z = torch.flip(Z, dims=(-1,))
return E, Z
Reported by Pylint.
Line: 100
Column: 13
# assuming that E is ordered
if largest:
E = torch.flip(E, dims=(-1,))
Z = torch.flip(Z, dims=(-1,))
return E, Z
Reported by Pylint.
torch/nn/quantized/modules/normalization.py
31 issues
Line: 21
Column: 39
**factory_kwargs)
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.layer_norm(
input, self.normalized_shape, weight=self.weight, bias=self.bias,
Reported by Pylint.
Line: 22
Column: 44
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.layer_norm(
input, self.normalized_shape, weight=self.weight, bias=self.bias,
eps=self.eps, output_scale=self.scale, output_zero_point=self.zero_point)
Reported by Pylint.
Line: 57
Column: 39
**factory_kwargs)
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.group_norm(
input, self.num_groups, self.weight, self.bias, self.eps, self.scale,
Reported by Pylint.
Line: 58
Column: 44
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.group_norm(
input, self.num_groups, self.weight, self.bias, self.eps, self.scale,
self.zero_point)
Reported by Pylint.
Line: 92
Column: 39
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs)
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.instance_norm(
input, self.weight, self.bias, self.eps, self.scale,
Reported by Pylint.
Line: 93
Column: 44
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.instance_norm(
input, self.weight, self.bias, self.eps, self.scale,
self.zero_point)
Reported by Pylint.
Line: 127
Column: 39
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs)
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.instance_norm(
input, self.weight, self.bias, self.eps, self.scale,
Reported by Pylint.
Line: 128
Column: 44
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.instance_norm(
input, self.weight, self.bias, self.eps, self.scale,
self.zero_point)
Reported by Pylint.
Line: 162
Column: 39
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs)
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.instance_norm(
input, self.weight, self.bias, self.eps, self.scale,
Reported by Pylint.
Line: 163
Column: 44
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.instance_norm(
input, self.weight, self.bias, self.eps, self.scale,
self.zero_point)
Reported by Pylint.
caffe2/python/convnet_benchmarks.py
31 issues
Line: 633
Column: 19
"{0}_init_batch_{1}.pbtxt".format(arg.model, arg.batch_size), "w"
) as fid:
fid.write(str(model.param_init_net.Proto()))
with open("{0}.pbtxt".format(arg.model, arg.batch_size), "w") as fid:
fid.write(str(model.net.Proto()))
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
workspace.BenchmarkNet(
Reported by Pylint.
Line: 66
Column: 16
from caffe2.python import workspace, brew, model_helper
def MLP(order, cudnn_ws):
model = model_helper.ModelHelper(name="MLP")
d = 256
depth = 20
width = 3
for i in range(depth):
Reported by Pylint.
Line: 66
Column: 9
from caffe2.python import workspace, brew, model_helper
def MLP(order, cudnn_ws):
model = model_helper.ModelHelper(name="MLP")
d = 256
depth = 20
width = 3
for i in range(depth):
Reported by Pylint.
Line: 66
Column: 1
from caffe2.python import workspace, brew, model_helper
def MLP(order, cudnn_ws):
model = model_helper.ModelHelper(name="MLP")
d = 256
depth = 20
width = 3
for i in range(depth):
Reported by Pylint.
Line: 66
Column: 1
from caffe2.python import workspace, brew, model_helper
def MLP(order, cudnn_ws):
model = model_helper.ModelHelper(name="MLP")
d = 256
depth = 20
width = 3
for i in range(depth):
Reported by Pylint.
Line: 68
Column: 5
def MLP(order, cudnn_ws):
model = model_helper.ModelHelper(name="MLP")
d = 256
depth = 20
width = 3
for i in range(depth):
for j in range(width):
current = "fc_{}_{}".format(i, j) if i > 0 else "data"
Reported by Pylint.
Line: 101
Column: 1
return model, d
def AlexNet(order, cudnn_ws):
my_arg_scope = {
'order': order,
'use_cudnn': True,
'cudnn_exhaustive_search': True,
}
Reported by Pylint.
Line: 101
Column: 1
return model, d
def AlexNet(order, cudnn_ws):
my_arg_scope = {
'order': order,
'use_cudnn': True,
'cudnn_exhaustive_search': True,
}
Reported by Pylint.
Line: 101
Column: 1
return model, d
def AlexNet(order, cudnn_ws):
my_arg_scope = {
'order': order,
'use_cudnn': True,
'cudnn_exhaustive_search': True,
}
Reported by Pylint.
Line: 194
Column: 1
return model, 224
def OverFeat(order, cudnn_ws):
my_arg_scope = {
'order': order,
'use_cudnn': True,
'cudnn_exhaustive_search': True,
}
Reported by Pylint.