The following issues were found
caffe2/python/models/seq2seq/seq2seq_util.py
46 issues
Line: 13
Column: 1
import collections
from future.utils import viewitems
import caffe2.proto.caffe2_pb2 as caffe2_pb2
from caffe2.python import attention, core, rnn_cell, brew
PAD_ID = 0
PAD = '<PAD>'
Reported by Pylint.
Line: 31
Column: 5
vocab = collections.defaultdict(lambda: len(vocab))
freqs = collections.defaultdict(lambda: 0)
# Adding padding tokens to the vocabulary to maintain consistency with IDs
vocab[PAD]
vocab[GO]
vocab[EOS]
vocab[UNK]
with open(corpus) as f:
Reported by Pylint.
Line: 32
Column: 5
freqs = collections.defaultdict(lambda: 0)
# Adding padding tokens to the vocabulary to maintain consistency with IDs
vocab[PAD]
vocab[GO]
vocab[EOS]
vocab[UNK]
with open(corpus) as f:
for sentence in f:
Reported by Pylint.
Line: 33
Column: 5
# Adding padding tokens to the vocabulary to maintain consistency with IDs
vocab[PAD]
vocab[GO]
vocab[EOS]
vocab[UNK]
with open(corpus) as f:
for sentence in f:
tokens = sentence.strip().split()
Reported by Pylint.
Line: 34
Column: 5
vocab[PAD]
vocab[GO]
vocab[EOS]
vocab[UNK]
with open(corpus) as f:
for sentence in f:
tokens = sentence.strip().split()
for token in tokens:
Reported by Pylint.
Line: 43
Column: 13
freqs[token] += 1
for token, freq in viewitems(freqs):
if freq > unk_threshold:
vocab[token]
return vocab
def get_numberized_sentence(sentence, vocab):
Reported by Pylint.
Line: 230
Column: 5
num_decoder_layers,
inputs,
input_lengths,
vocab_size,
embeddings,
embedding_size,
use_attention,
num_gpus=0,
forward_only=False,
Reported by Pylint.
Line: 338
Column: 9
encoder_outputs,
encoder_output_dim,
encoder_lengths,
vocab_size,
attention_type,
embedding_size,
decoder_num_units,
decoder_cells,
residual_output_layers=None,
Reported by Pylint.
Line: 340
Column: 9
encoder_lengths,
vocab_size,
attention_type,
embedding_size,
decoder_num_units,
decoder_cells,
residual_output_layers=None,
name=None,
weighted_encoder_outputs=None,
Reported by Pylint.
Line: 27
Column: 1
UNK = '<UNK>'
def gen_vocab(corpus, unk_threshold):
vocab = collections.defaultdict(lambda: len(vocab))
freqs = collections.defaultdict(lambda: 0)
# Adding padding tokens to the vocabulary to maintain consistency with IDs
vocab[PAD]
vocab[GO]
Reported by Pylint.
test/onnx/export_onnx_tests_generator.py
46 issues
Line: 1
Column: 1
from torch.autograd import Variable
from onnx import numpy_helper
import io
import onnx
import os
import shutil
import torch
import traceback
Reported by Pylint.
Line: 2
Column: 1
from torch.autograd import Variable
from onnx import numpy_helper
import io
import onnx
import os
import shutil
import torch
import traceback
Reported by Pylint.
Line: 5
Column: 1
from onnx import numpy_helper
import io
import onnx
import os
import shutil
import torch
import traceback
Reported by Pylint.
Line: 8
Column: 1
import onnx
import os
import shutil
import torch
import traceback
import test_onnx_common
from torch.testing._internal.common_nn import module_tests
from test_nn import new_module_tests
Reported by Pylint.
Line: 12
Column: 1
import traceback
import test_onnx_common
from torch.testing._internal.common_nn import module_tests
from test_nn import new_module_tests
# Take a test case (a dict) as input, return the test name.
def get_test_name(testcase):
Reported by Pylint.
Line: 13
Column: 1
import test_onnx_common
from torch.testing._internal.common_nn import module_tests
from test_nn import new_module_tests
# Take a test case (a dict) as input, return the test name.
def get_test_name(testcase):
if "fullname" in testcase:
Reported by Pylint.
Line: 34
Column: 9
testcase["input_size"] = (1,)
return Variable(torch.randn(*testcase["input_size"]))
elif "input_fn" in testcase:
input = testcase["input_fn"]()
if isinstance(input, Variable):
return input
return Variable(testcase["input_fn"]())
Reported by Pylint.
Line: 81
Column: 19
fun(info, l)
def convert_tests(testcases, sets=1):
print("Collect {} test cases from PyTorch.".format(len(testcases)))
failed = 0
FunctionalModule_nums = 0
nn_module = {}
for t in testcases:
Reported by Pylint.
Line: 96
Column: 13
if (module_name not in nn_module):
nn_module[module_name] = 0
try:
input = gen_input(t)
f = io.BytesIO()
torch.onnx._export(module, input, f,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
onnx_model = onnx.load_from_string(f.getvalue())
onnx.checker.check_model(onnx_model)
Reported by Pylint.
Line: 98
Column: 13
try:
input = gen_input(t)
f = io.BytesIO()
torch.onnx._export(module, input, f,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
onnx_model = onnx.load_from_string(f.getvalue())
onnx.checker.check_model(onnx_model)
onnx.helper.strip_doc_string(onnx_model)
output_dir = os.path.join(test_onnx_common.pytorch_converted_dir, test_name)
Reported by Pylint.
torch/jit/supported_ops.py
46 issues
Line: 12
Column: 16
def _hidden(name):
return name.startswith('_') and not name.startswith('__')
def _emit_type(type):
return str(type)
def _emit_arg(indent, i, arg):
v = "{} : {}".format(arg.name, _emit_type(arg.type))
default = arg.default_value
Reported by Pylint.
Line: 52
Column: 38
self = schema.arguments[0]
if self.name != 'self':
return False
if not self.type.isSubtypeOf(torch._C.TensorType.get()):
return False
return True
methods = []
# discover methods
Reported by Pylint.
Line: 60
Column: 23
# discover methods
for elem in dir(torch.Tensor):
if not _hidden(elem):
schemas = torch._C._jit_get_schemas_for_operator("aten::" + elem)
for schema in schemas:
if is_tensor_method(schema):
methods.append(_emit_schema('Tensor', elem, schema, arg_start=1))
return "Supported Tensor Methods", methods
Reported by Pylint.
Line: 60
Column: 23
# discover methods
for elem in dir(torch.Tensor):
if not _hidden(elem):
schemas = torch._C._jit_get_schemas_for_operator("aten::" + elem)
for schema in schemas:
if is_tensor_method(schema):
methods.append(_emit_schema('Tensor', elem, schema, arg_start=1))
return "Supported Tensor Methods", methods
Reported by Pylint.
Line: 92
Column: 9
scripted = torch.jit.script(attr)
schema = scripted.schema
functions.append(_emit_schema(name, elem, schema))
except: # noqa: B001,E722
# Skip interpolate / boolean dispatched things
pass
# Iterate over modules that we know contain a lot of builtins
for mod in torch.jit._builtins._modules_containing_builtins:
Reported by Pylint.
Line: 97
Column: 16
pass
# Iterate over modules that we know contain a lot of builtins
for mod in torch.jit._builtins._modules_containing_builtins:
name = mod.__name__
for elem in dir(mod):
builtin = _find_builtin(getattr(mod, elem))
if builtin is not None:
schemas = torch._C._jit_get_schemas_for_operator(builtin)
Reported by Pylint.
Line: 97
Column: 16
pass
# Iterate over modules that we know contain a lot of builtins
for mod in torch.jit._builtins._modules_containing_builtins:
name = mod.__name__
for elem in dir(mod):
builtin = _find_builtin(getattr(mod, elem))
if builtin is not None:
schemas = torch._C._jit_get_schemas_for_operator(builtin)
Reported by Pylint.
Line: 102
Column: 27
for elem in dir(mod):
builtin = _find_builtin(getattr(mod, elem))
if builtin is not None:
schemas = torch._C._jit_get_schemas_for_operator(builtin)
for schema in schemas:
# remove _tan but not __and__
if not _hidden(elem):
functions.append(_emit_schema(name, elem, schema))
return "Supported PyTorch Functions", functions
Reported by Pylint.
Line: 102
Column: 27
for elem in dir(mod):
builtin = _find_builtin(getattr(mod, elem))
if builtin is not None:
schemas = torch._C._jit_get_schemas_for_operator(builtin)
for schema in schemas:
# remove _tan but not __and__
if not _hidden(elem):
functions.append(_emit_schema(name, elem, schema))
return "Supported PyTorch Functions", functions
Reported by Pylint.
Line: 111
Column: 30
def _get_builtins_helper():
builtins = []
for fn, _builtin_name in torch.jit._builtins._builtin_ops:
mod = inspect.getmodule(fn)
if not hasattr(fn, '__name__'):
# typing classes
continue
Reported by Pylint.
torch/testing/_internal/common_modules.py
46 issues
Line: 161
Column: 50
module_inputs = [
ModuleInput(constructor_input=FunctionInput(10, 8),
forward_input=FunctionInput(make_input((4, 10))),
reference_fn=lambda m, p, i: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8)),
ModuleInput(constructor_input=FunctionInput(10, 8, bias=False),
forward_input=FunctionInput(make_input((4, 10))),
desc='no_bias',
reference_fn=lambda m, p, i: torch.mm(i, p[0].t())),
ModuleInput(constructor_input=FunctionInput(3, 5),
Reported by Pylint.
Line: 165
Column: 50
ModuleInput(constructor_input=FunctionInput(10, 8, bias=False),
forward_input=FunctionInput(make_input((4, 10))),
desc='no_bias',
reference_fn=lambda m, p, i: torch.mm(i, p[0].t())),
ModuleInput(constructor_input=FunctionInput(3, 5),
forward_input=FunctionInput(make_input(3)),
desc='no_batch_dim',
reference_fn=lambda m, p, i: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1])
]
Reported by Pylint.
Line: 169
Column: 50
ModuleInput(constructor_input=FunctionInput(3, 5),
forward_input=FunctionInput(make_input(3)),
desc='no_batch_dim',
reference_fn=lambda m, p, i: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1])
]
return module_inputs
Reported by Pylint.
Line: 194
Column: 53
module_inputs.append(
ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
forward_input=FunctionInput(make_input((15, 10)).log_softmax(dim=1),
torch.empty(15, device=device).uniform_().mul(10).floor().long()),
desc=desc,
reference_fn=reference_fn)
)
return module_inputs
Reported by Pylint.
Line: 27
Column: 5
torch.nn.Module, # abstract base class
torch.nn.Container, # deprecated
torch.nn.NLLLoss2d, # deprecated
torch.nn.quantized.modules._ConvNd, # abstract base class
torch.nn.quantized.MaxPool2d, # aliases to nn.MaxPool2d
}
# List of all module classes to test.
MODULE_CLASSES: List[Type] = list(chain(*[
Reported by Pylint.
Line: 54
Column: 3
def _parametrize_test(self, test, generic_cls, device_cls):
for module_info in self.module_info_list:
# TODO: Factor some of this out since it's similar to OpInfo.
for dtype in floating_types():
# Construct the test name.
test_name = '{}_{}_{}{}'.format(test.__name__,
module_info.name.replace('.', '_'),
device_cls.device_type,
Reported by Pylint.
Line: 95
Column: 27
raise ex
def formatted_module_name(module_cls):
""" Returns the common name of the module class formatted for use in test names. """
return MODULE_CLASS_NAMES[module_cls].replace('.', '_')
class FunctionInput(object):
Reported by Pylint.
Line: 136
Column: 18
""" Module information to be used in testing. """
def __init__(self,
module_cls, # Class object for the module under test
*,
module_inputs_func, # Function to generate module inputs
skips=(), # Indicates which tests to skip
decorators=None, # Additional decorators to apply to generated tests
):
Reported by Pylint.
Line: 155
Column: 1
return formatted_module_name(self.module_cls)
def module_inputs_torch_nn_Linear(module_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
module_inputs = [
ModuleInput(constructor_input=FunctionInput(10, 8),
forward_input=FunctionInput(make_input((4, 10))),
Reported by Pylint.
Line: 155
Column: 35
return formatted_module_name(self.module_cls)
def module_inputs_torch_nn_Linear(module_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
module_inputs = [
ModuleInput(constructor_input=FunctionInput(10, 8),
forward_input=FunctionInput(make_input((4, 10))),
Reported by Pylint.
caffe2/core/nomnigraph/op_gen.py
46 issues
Line: 13
Column: 17
from subprocess import call
def parse_lines(lines):
# States
EMPTY = 0
OP = 1
MACRO = 2
parse_state = EMPTY
Reported by Pylint.
Line: 63
Column: 5
curr_op = ""
# dict of the form
# opName : { attributes: [], ... }
ops = {}
# To preserve parsing order for dependencies (for things like init_from)
op_list = []
for line in lines:
if not len(line):
Reported by Pylint.
Line: 65
Column: 5
# opName : { attributes: [], ... }
ops = {}
# To preserve parsing order for dependencies (for things like init_from)
op_list = []
for line in lines:
if not len(line):
continue
if line[0] == "-":
Reported by Pylint.
Line: 190
Column: 22
)
def gen_classes(ops, op_list):
f = ""
for op in op_list:
f += gen_class(op, ops[op])
return f
Reported by Pylint.
Line: 190
Column: 17
)
def gen_classes(ops, op_list):
f = ""
for op in op_list:
f += gen_class(op, ops[op])
return f
Reported by Pylint.
Line: 191
Column: 5
def gen_classes(ops, op_list):
f = ""
for op in op_list:
f += gen_class(op, ops[op])
return f
Reported by Pylint.
Line: 197
Column: 14
return f
def gen_enum(op_list):
return ",\n".join([op for op in op_list]) + "\n"
def gen_names(op_list):
f = ""
Reported by Pylint.
Line: 201
Column: 15
return ",\n".join([op for op in op_list]) + "\n"
def gen_names(op_list):
f = ""
for op in op_list:
f += dedent(
"""
case NNKind::{name}:
Reported by Pylint.
Line: 202
Column: 5
def gen_names(op_list):
f = ""
for op in op_list:
f += dedent(
"""
case NNKind::{name}:
return \"{name}\";
Reported by Pylint.
Line: 244
Column: 12
call(cmd)
cmd = ["clang-format", "-i", install_dir + "/OpEnum.h"]
call(cmd)
except Exception:
pass
Reported by Pylint.
caffe2/contrib/nccl/nccl_ops_test.py
45 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, assume
import numpy as np
import time
import os
from caffe2.proto import caffe2_pb2
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, assume
import numpy as np
import time
import os
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, muji, dyndep
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, assume
import numpy as np
Reported by Pylint.
Line: 10
Column: 1
import hypothesis.strategies as st
from hypothesis import given, assume
import numpy as np
import time
import os
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, muji, dyndep
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 11
Column: 1
from hypothesis import given, assume
import numpy as np
import time
import os
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, muji, dyndep
import caffe2.python.hypothesis_test_util as hu
np.random.seed(1)
Reported by Pylint.
Line: 21
Column: 1
dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/nccl:nccl_ops')
def gpu_device(i):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = workspace.GpuDeviceType
device_option.device_id = i
return device_option
Reported by Pylint.
Line: 28
Column: 1
return device_option
def benchmark(ws, net, warmups=5, iters=100):
for _ in range(warmups):
ws.run(net)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("test-step", net, iters))
before = time.time()
Reported by Pylint.
Line: 28
Column: 1
return device_option
def benchmark(ws, net, warmups=5, iters=100):
for _ in range(warmups):
ws.run(net)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("test-step", net, iters))
before = time.time()
Reported by Pylint.
Line: 42
Column: 1
@unittest.skipIf(not workspace.has_cuda_support, "NCCL only on CUDA GPU")
class NCCLOpsTest(hu.HypothesisTestCase):
@given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),
m=st.integers(min_value=1, max_value=1000),
in_place=st.booleans())
def test_nccl_allreduce(self, n, m, in_place):
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
Reported by Pylint.
Line: 46
Column: 5
@given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),
m=st.integers(min_value=1, max_value=1000),
in_place=st.booleans())
def test_nccl_allreduce(self, n, m, in_place):
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
prefix = "" if in_place else "o"
outputs = [str("{}x_{}".format(prefix, i)) for i in range(n)]
op = core.CreateOperator("NCCLAllreduce", inputs, outputs)
Reported by Pylint.
caffe2/python/operator_test/mpi_test.py
45 issues
Line: 6
Column: 1
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 7
Column: 1
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
import unittest
from caffe2.python import core, workspace, dyndep
Reported by Pylint.
Line: 26
Column: 9
try:
# pyre-fixme[21]: undefined import
from mpi4py import MPI
global _has_mpi, COMM, RANK, SIZE
_has_mpi = core.IsOperatorWithEngine("CreateCommonWorld", "MPI")
COMM = MPI.COMM_WORLD
RANK = COMM.Get_rank()
SIZE = COMM.Get_size()
except ImportError:
Reported by Pylint.
Line: 42
Column: 54
root=st.integers(min_value=0, max_value=SIZE - 1),
device_option=st.sampled_from(hu.device_options),
**hu.gcs)
def test_broadcast(self, X, root, device_option, gc, dc):
# Use mpi4py's broadcast to make sure that all nodes inherit the
# same hypothesis test.
X = COMM.bcast(X)
root = COMM.bcast(root)
device_option = COMM.bcast(device_option)
Reported by Pylint.
Line: 42
Column: 58
root=st.integers(min_value=0, max_value=SIZE - 1),
device_option=st.sampled_from(hu.device_options),
**hu.gcs)
def test_broadcast(self, X, root, device_option, gc, dc):
# Use mpi4py's broadcast to make sure that all nodes inherit the
# same hypothesis test.
X = COMM.bcast(X)
root = COMM.bcast(root)
device_option = COMM.bcast(device_option)
Reported by Pylint.
Line: 67
Column: 55
root=st.integers(min_value=0, max_value=SIZE - 1),
device_option=st.sampled_from(hu.device_options),
**hu.gcs)
def test_reduce(self, X, root, device_option, gc, dc):
# Use mpi4py's broadcast to make sure that all nodes inherit the
# same hypothesis test.
X = COMM.bcast(X)
root = COMM.bcast(root)
device_option = COMM.bcast(device_option)
Reported by Pylint.
Line: 67
Column: 51
root=st.integers(min_value=0, max_value=SIZE - 1),
device_option=st.sampled_from(hu.device_options),
**hu.gcs)
def test_reduce(self, X, root, device_option, gc, dc):
# Use mpi4py's broadcast to make sure that all nodes inherit the
# same hypothesis test.
X = COMM.bcast(X)
root = COMM.bcast(root)
device_option = COMM.bcast(device_option)
Reported by Pylint.
Line: 94
Column: 67
device_option=st.sampled_from(hu.device_options),
inplace=st.booleans(),
**hu.gcs)
def test_allreduce(self, X, root, device_option, inplace, gc, dc):
# Use mpi4py's broadcast to make sure that all nodes inherit the
# same hypothesis test.
X = COMM.bcast(X)
root = COMM.bcast(root)
device_option = COMM.bcast(device_option)
Reported by Pylint.
Line: 94
Column: 63
device_option=st.sampled_from(hu.device_options),
inplace=st.booleans(),
**hu.gcs)
def test_allreduce(self, X, root, device_option, inplace, gc, dc):
# Use mpi4py's broadcast to make sure that all nodes inherit the
# same hypothesis test.
X = COMM.bcast(X)
root = COMM.bcast(root)
device_option = COMM.bcast(device_option)
Reported by Pylint.
Line: 129
Column: 17
**hu.gcs)
def test_sendrecv(
self, X, device_option, specify_send_blob, specify_recv_blob,
gc, dc):
# Use mpi4py's broadcast to make sure that all nodes inherit the
# same hypothesis test.
X = COMM.bcast(X)
device_option = COMM.bcast(device_option)
specify_send_blob = COMM.bcast(specify_send_blob)
Reported by Pylint.
caffe2/python/operator_test/affine_channel_op_test.py
45 issues
Line: 8
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
class TestAffineChannelOp(serial.SerializedTestCase):
Reported by Pylint.
Line: 9
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
class TestAffineChannelOp(serial.SerializedTestCase):
def affine_channel_nchw_ref(self, X, scale, bias):
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
Reported by Pylint.
Line: 13
Column: 1
import numpy as np
class TestAffineChannelOp(serial.SerializedTestCase):
def affine_channel_nchw_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
Reported by Pylint.
Line: 14
Column: 5
class TestAffineChannelOp(serial.SerializedTestCase):
def affine_channel_nchw_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
scale = scale.reshape(C, 1)
Reported by Pylint.
Line: 14
Column: 5
class TestAffineChannelOp(serial.SerializedTestCase):
def affine_channel_nchw_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
scale = scale.reshape(C, 1)
Reported by Pylint.
Line: 14
Column: 5
class TestAffineChannelOp(serial.SerializedTestCase):
def affine_channel_nchw_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
scale = scale.reshape(C, 1)
Reported by Pylint.
Line: 16
Column: 9
class TestAffineChannelOp(serial.SerializedTestCase):
def affine_channel_nchw_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
scale = scale.reshape(C, 1)
bias = bias.reshape(C, 1)
Y = X * scale + bias
Reported by Pylint.
Line: 17
Column: 9
def affine_channel_nchw_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
scale = scale.reshape(C, 1)
bias = bias.reshape(C, 1)
Y = X * scale + bias
return [Y.reshape(dims)]
Reported by Pylint.
Line: 21
Column: 9
X = X.reshape(N, C, -1)
scale = scale.reshape(C, 1)
bias = bias.reshape(C, 1)
Y = X * scale + bias
return [Y.reshape(dims)]
def affine_channel_nhwc_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
Reported by Pylint.
caffe2/python/optimizer_test_util.py
45 issues
Line: 47
Column: 9
sq = model.SquaredL2Distance([out, 'label'])
loss = model.AveragedLoss(sq, "avg_loss")
grad_map = model.AddGradientOperators([loss])
self.assertIsInstance(grad_map['fc_w'], core.BlobReference)
return (model, perfect_model, data, label)
def testDense(self):
model, perfect_model, data, label = self._createDense()
optimizer = self.build_optimizer(model)
Reported by Pylint.
Line: 52
Column: 21
def testDense(self):
model, perfect_model, data, label = self._createDense()
optimizer = self.build_optimizer(model)
workspace.FeedBlob('data', data[0])
workspace.FeedBlob('label', label[0])
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
for _ in range(2000):
Reported by Pylint.
Line: 68
Column: 9
workspace.FetchBlob('fc_w'),
atol=1e-2
)
self.check_optimizer(optimizer)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
def testGPUDense(self, dtype=core.DataType.FLOAT):
device_opt = core.DeviceOption(workspace.GpuDeviceType, 0)
with core.DeviceScope(device_opt):
Reported by Pylint.
Line: 87
Column: 9
brew.fc(model, 'fc_cpu', 'fc2', dim_in=1, dim_out=10, axis=0)
# Create optimizer in default device scope
self.build_optimizer(model)
if self._skip_gpu:
return
# Run net to see it does not crash
Reported by Pylint.
Line: 89
Column: 12
# Create optimizer in default device scope
self.build_optimizer(model)
if self._skip_gpu:
return
# Run net to see it does not crash
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
Reported by Pylint.
Line: 119
Column: 9
sq = model.SquaredL2Distance([out, 'label'])
loss = model.AveragedLoss(sq, "avg_loss")
grad_map = model.AddGradientOperators([loss])
self.assertIsInstance(grad_map['w'], core.GradientSlice)
optimizer = self.build_optimizer(model)
workspace.CreateBlob('indices')
workspace.CreateBlob('label')
Reported by Pylint.
Line: 120
Column: 21
loss = model.AveragedLoss(sq, "avg_loss")
grad_map = model.AddGradientOperators([loss])
self.assertIsInstance(grad_map['w'], core.GradientSlice)
optimizer = self.build_optimizer(model)
workspace.CreateBlob('indices')
workspace.CreateBlob('label')
for indices_type in [np.int32, np.int64]:
Reported by Pylint.
Line: 148
Column: 9
workspace.FetchBlob('w'),
atol=1e-2
)
self.check_optimizer(optimizer)
class LRModificationTestBase(object):
"""
This is an abstract base class.
Reported by Pylint.
Line: 178
Column: 45
def test_global_norm_based_gradient_clipping(self):
max_gradient_norm = 1.0
model, perfect_model, data, label = self._createDense()
opt = self.build_optimizer(model, max_gradient_norm=max_gradient_norm)
params = []
for param in model.GetParams(top_scope=True):
if param in model.param_to_grad:
Reported by Pylint.
Line: 179
Column: 15
def test_global_norm_based_gradient_clipping(self):
max_gradient_norm = 1.0
model, perfect_model, data, label = self._createDense()
opt = self.build_optimizer(model, max_gradient_norm=max_gradient_norm)
params = []
for param in model.GetParams(top_scope=True):
if param in model.param_to_grad:
if not isinstance(
Reported by Pylint.
torch/utils/benchmark/examples/end_to_end.py
45 issues
Line: 374
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b602_subprocess_popen_with_shell_equals_true.html
"PATH": os.getenv("PATH", ""),
},
stdout=subprocess.PIPE,
shell=True
)
def test_source(envs):
"""Ensure that subprocess"""
Reported by Bandit.
Line: 69
Column: 19
"39744": {_CPU: True, _GPU: True},
}
_AVAILABLE_GPUS = queue.Queue[int]()
_DTYPES_TO_TEST = {
"39850": ("int8", "float32", "float64"),
"39967": ("float32", "float64"),
"39744": ("int8", "float32", "float64"),
}
Reported by Pylint.
Line: 76
Column: 16
"39744": ("int8", "float32", "float64"),
}
_DTYPE_STR_TO_DTYPE = {
"float64": torch.float64,
"float32": torch.float32,
"int8": torch.int8,
}
Reported by Pylint.
Line: 77
Column: 16
}
_DTYPE_STR_TO_DTYPE = {
"float64": torch.float64,
"float32": torch.float32,
"int8": torch.int8,
}
def parse_args():
Reported by Pylint.
Line: 78
Column: 13
_DTYPE_STR_TO_DTYPE = {
"float64": torch.float64,
"float32": torch.float32,
"int8": torch.int8,
}
def parse_args():
parser = argparse.ArgumentParser()
Reported by Pylint.
Line: 115
Column: 17
def construct_stmt_and_label(pr, params):
if pr == "39850":
k0, k1, k2, dim = [params[i] for i in ["k0", "k1", "k2", "dim"]]
state = np.random.RandomState(params["random_value"])
topk_dim = state.randint(low=0, high=dim)
dim_size = [k0, k1, k2][topk_dim]
k = max(int(np.floor(2 ** state.uniform(low=0, high=np.log2(dim_size)))), 1)
return f"torch.topk(x, dim={topk_dim}, k={k})", "topk"
Reported by Pylint.
Line: 126
Column: 17
return "torch.std(x)", "std"
if pr == "39744":
state = np.random.RandomState(params["random_value"])
sort_dim = state.randint(low=0, high=params["dim"])
return f"torch.sort(x, dim={sort_dim})", "sort"
raise ValueError("Unknown PR")
Reported by Pylint.
Line: 95
Column: 5
parser.add_argument("--DETAIL_result_file", type=str, default=None)
parser.add_argument("--DETAIL_seed", type=int, default=None)
args = parser.parse_args()
if args.num_gpus is None:
args.num_gpus = torch.cuda.device_count()
return args
Reported by Pylint.
Line: 133
Column: 21
raise ValueError("Unknown PR")
def subprocess_main(args):
seed = args.DETAIL_seed
cuda = (args.DETAIL_device == _GPU)
with open(args.DETAIL_result_file, "ab") as f:
for dtype_str in _DTYPES_TO_TEST[args.pr]:
Reported by Pylint.
Line: 162
Column: 11
pickle.dump(measurement, f)
def _main(args):
pools, map_iters, finished_counts = {}, {}, {}
pr = args.pr
envs = (_REF_ENV_TEMPLATE.format(pr=pr), _PR_ENV_TEMPLATE.format(pr=pr))
# We initialize both pools at the start so that they run simultaneously
Reported by Pylint.