The following issues were found
caffe2/python/toy_regression_test.py
18 issues
Line: 1
Column: 1
import numpy as np
import unittest
from caffe2.python import core, workspace, test_util
class TestToyRegression(test_util.TestCase):
def testToyRegression(self):
"""Tests a toy regression end to end.
Reported by Pylint.
Line: 2
Column: 1
import numpy as np
import unittest
from caffe2.python import core, workspace, test_util
class TestToyRegression(test_util.TestCase):
def testToyRegression(self):
"""Tests a toy regression end to end.
Reported by Pylint.
Line: 7
Column: 1
from caffe2.python import core, workspace, test_util
class TestToyRegression(test_util.TestCase):
def testToyRegression(self):
"""Tests a toy regression end to end.
The test code carries a simple toy regression in the form
y = 2.0 x1 + 1.5 x2 + 0.5
Reported by Pylint.
Line: 8
Column: 5
class TestToyRegression(test_util.TestCase):
def testToyRegression(self):
"""Tests a toy regression end to end.
The test code carries a simple toy regression in the form
y = 2.0 x1 + 1.5 x2 + 0.5
by randomly generating gaussian inputs and calculating the ground
Reported by Pylint.
Line: 8
Column: 5
class TestToyRegression(test_util.TestCase):
def testToyRegression(self):
"""Tests a toy regression end to end.
The test code carries a simple toy regression in the form
y = 2.0 x1 + 1.5 x2 + 0.5
by randomly generating gaussian inputs and calculating the ground
Reported by Pylint.
Line: 8
Column: 5
class TestToyRegression(test_util.TestCase):
def testToyRegression(self):
"""Tests a toy regression end to end.
The test code carries a simple toy regression in the form
y = 2.0 x1 + 1.5 x2 + 0.5
by randomly generating gaussian inputs and calculating the ground
Reported by Pylint.
Line: 19
Column: 9
"""
workspace.ResetWorkspace()
init_net = core.Net("init")
W = init_net.UniformFill([], "W", shape=[1, 2], min=-1., max=1.)
B = init_net.ConstantFill([], "B", shape=[1], value=0.0)
W_gt = init_net.GivenTensorFill(
[], "W_gt", shape=[1, 2], values=[2.0, 1.5])
B_gt = init_net.GivenTensorFill([], "B_gt", shape=[1], values=[0.5])
LR = init_net.ConstantFill([], "LR", shape=[1], value=-0.1)
Reported by Pylint.
Line: 20
Column: 9
workspace.ResetWorkspace()
init_net = core.Net("init")
W = init_net.UniformFill([], "W", shape=[1, 2], min=-1., max=1.)
B = init_net.ConstantFill([], "B", shape=[1], value=0.0)
W_gt = init_net.GivenTensorFill(
[], "W_gt", shape=[1, 2], values=[2.0, 1.5])
B_gt = init_net.GivenTensorFill([], "B_gt", shape=[1], values=[0.5])
LR = init_net.ConstantFill([], "LR", shape=[1], value=-0.1)
ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
Reported by Pylint.
Line: 21
Column: 9
init_net = core.Net("init")
W = init_net.UniformFill([], "W", shape=[1, 2], min=-1., max=1.)
B = init_net.ConstantFill([], "B", shape=[1], value=0.0)
W_gt = init_net.GivenTensorFill(
[], "W_gt", shape=[1, 2], values=[2.0, 1.5])
B_gt = init_net.GivenTensorFill([], "B_gt", shape=[1], values=[0.5])
LR = init_net.ConstantFill([], "LR", shape=[1], value=-0.1)
ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
ITER = init_net.ConstantFill([], "ITER", shape=[1], value=0,
Reported by Pylint.
Line: 23
Column: 9
B = init_net.ConstantFill([], "B", shape=[1], value=0.0)
W_gt = init_net.GivenTensorFill(
[], "W_gt", shape=[1, 2], values=[2.0, 1.5])
B_gt = init_net.GivenTensorFill([], "B_gt", shape=[1], values=[0.5])
LR = init_net.ConstantFill([], "LR", shape=[1], value=-0.1)
ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
ITER = init_net.ConstantFill([], "ITER", shape=[1], value=0,
dtype=core.DataType.INT64)
Reported by Pylint.
caffe2/python/operator_test/glu_op_test.py
18 issues
Line: 9
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 10
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 33
Column: 40
**hu.gcs
)
@settings(deadline=10000)
def test_glu_old(self, X_axis, gc, dc):
X, axis = X_axis
def glu_ref(X):
x1, x2 = np.split(X, [X.shape[axis] // 2], axis=axis)
Y = x1 * (1. / (1. + np.exp(-x2)))
Reported by Pylint.
Line: 37
Column: 13
X, axis = X_axis
def glu_ref(X):
x1, x2 = np.split(X, [X.shape[axis] // 2], axis=axis)
Y = x1 * (1. / (1. + np.exp(-x2)))
return [Y]
op = core.CreateOperator("Glu", ["X"], ["Y"], dim=axis)
self.assertReferenceChecks(gc, op, [X], glu_ref)
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
Reported by Pylint.
Line: 13
Column: 1
import hypothesis.strategies as st
import numpy as np
import unittest
@st.composite
def _glu_old_input(draw):
dims = draw(st.lists(st.integers(min_value=1, max_value=5), min_size=1, max_size=3))
Reported by Pylint.
Line: 23
Column: 5
# The axis dimension must be divisible by two
axis_dim = 2 * draw(st.integers(min_value=1, max_value=2))
dims.insert(axis, axis_dim)
X = draw(hu.arrays(dims, np.float32, None))
return (X, axis)
class TestGlu(serial.SerializedTestCase):
@given(
Reported by Pylint.
Line: 27
Column: 1
return (X, axis)
class TestGlu(serial.SerializedTestCase):
@given(
X_axis=_glu_old_input(),
**hu.gcs
)
@settings(deadline=10000)
Reported by Pylint.
Line: 32
Column: 5
X_axis=_glu_old_input(),
**hu.gcs
)
@settings(deadline=10000)
def test_glu_old(self, X_axis, gc, dc):
X, axis = X_axis
def glu_ref(X):
x1, x2 = np.split(X, [X.shape[axis] // 2], axis=axis)
Reported by Pylint.
Line: 32
Column: 5
X_axis=_glu_old_input(),
**hu.gcs
)
@settings(deadline=10000)
def test_glu_old(self, X_axis, gc, dc):
X, axis = X_axis
def glu_ref(X):
x1, x2 = np.split(X, [X.shape[axis] // 2], axis=axis)
Reported by Pylint.
caffe2/python/operator_test/normalize_op_test.py
18 issues
Line: 8
Column: 1
import functools
import numpy as np
from hypothesis import given, settings
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import copy
Reported by Pylint.
Line: 1
Column: 1
import functools
import numpy as np
from hypothesis import given, settings
from caffe2.python import core
Reported by Pylint.
Line: 11
Column: 1
from hypothesis import given, settings
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import copy
class TestNormalizeOp(hu.HypothesisTestCase):
@given(
X=hu.tensor(
Reported by Pylint.
Line: 14
Column: 1
import copy
class TestNormalizeOp(hu.HypothesisTestCase):
@given(
X=hu.tensor(
min_dim=1, max_dim=5, elements=hu.floats(min_value=0.5, max_value=1.0)
),
**hu.gcs
Reported by Pylint.
Line: 21
Column: 5
),
**hu.gcs
)
@settings(max_examples=10, deadline=None)
def test_normalize(self, X, gc, dc):
def ref_normalize(X, axis):
x_normed = X / np.maximum(
np.sqrt((X ** 2).sum(axis=axis, keepdims=True)), 1e-12
)
Reported by Pylint.
Line: 21
Column: 5
),
**hu.gcs
)
@settings(max_examples=10, deadline=None)
def test_normalize(self, X, gc, dc):
def ref_normalize(X, axis):
x_normed = X / np.maximum(
np.sqrt((X ** 2).sum(axis=axis, keepdims=True)), 1e-12
)
Reported by Pylint.
Line: 21
Column: 5
),
**hu.gcs
)
@settings(max_examples=10, deadline=None)
def test_normalize(self, X, gc, dc):
def ref_normalize(X, axis):
x_normed = X / np.maximum(
np.sqrt((X ** 2).sum(axis=axis, keepdims=True)), 1e-12
)
Reported by Pylint.
Line: 21
Column: 5
),
**hu.gcs
)
@settings(max_examples=10, deadline=None)
def test_normalize(self, X, gc, dc):
def ref_normalize(X, axis):
x_normed = X / np.maximum(
np.sqrt((X ** 2).sum(axis=axis, keepdims=True)), 1e-12
)
Reported by Pylint.
Line: 23
Column: 9
)
@settings(max_examples=10, deadline=None)
def test_normalize(self, X, gc, dc):
def ref_normalize(X, axis):
x_normed = X / np.maximum(
np.sqrt((X ** 2).sum(axis=axis, keepdims=True)), 1e-12
)
return (x_normed,)
Reported by Pylint.
Line: 30
Column: 13
return (x_normed,)
for axis in range(-X.ndim, X.ndim):
x = copy.copy(X)
op = core.CreateOperator("Normalize", "X", "Y", axis=axis)
self.assertReferenceChecks(
gc, op, [x], functools.partial(ref_normalize, axis=axis)
)
self.assertDeviceChecks(dc, op, [x], [0])
Reported by Pylint.
test/distributed/test_c10d_spawn_nccl.py
18 issues
Line: 3
Column: 1
import sys
import test_c10d_spawn
import torch
import torch.distributed as c10d
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import TestCase, run_tests, sandcastle_skip_if
NO_NCCL = not hasattr(c10d, "ProcessGroupNCCL")
Reported by Pylint.
Line: 4
Column: 1
import sys
import test_c10d_spawn
import torch
import torch.distributed as c10d
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import TestCase, run_tests, sandcastle_skip_if
NO_NCCL = not hasattr(c10d, "ProcessGroupNCCL")
Reported by Pylint.
Line: 5
Column: 1
import test_c10d_spawn
import torch
import torch.distributed as c10d
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import TestCase, run_tests, sandcastle_skip_if
NO_NCCL = not hasattr(c10d, "ProcessGroupNCCL")
# Fails on Python-3.9, see https://github.com/pytorch/pytorch/issues/51619
Reported by Pylint.
Line: 6
Column: 1
import torch
import torch.distributed as c10d
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import TestCase, run_tests, sandcastle_skip_if
NO_NCCL = not hasattr(c10d, "ProcessGroupNCCL")
# Fails on Python-3.9, see https://github.com/pytorch/pytorch/issues/51619
if sys.version_info < (3, 9):
Reported by Pylint.
Line: 1
Column: 1
import sys
import test_c10d_spawn
import torch
import torch.distributed as c10d
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import TestCase, run_tests, sandcastle_skip_if
NO_NCCL = not hasattr(c10d, "ProcessGroupNCCL")
Reported by Pylint.
Line: 3
Column: 1
import sys
import test_c10d_spawn
import torch
import torch.distributed as c10d
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import TestCase, run_tests, sandcastle_skip_if
NO_NCCL = not hasattr(c10d, "ProcessGroupNCCL")
Reported by Pylint.
Line: 4
Column: 1
import sys
import test_c10d_spawn
import torch
import torch.distributed as c10d
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import TestCase, run_tests, sandcastle_skip_if
NO_NCCL = not hasattr(c10d, "ProcessGroupNCCL")
Reported by Pylint.
Line: 5
Column: 1
import test_c10d_spawn
import torch
import torch.distributed as c10d
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import TestCase, run_tests, sandcastle_skip_if
NO_NCCL = not hasattr(c10d, "ProcessGroupNCCL")
# Fails on Python-3.9, see https://github.com/pytorch/pytorch/issues/51619
Reported by Pylint.
Line: 6
Column: 1
import torch
import torch.distributed as c10d
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import TestCase, run_tests, sandcastle_skip_if
NO_NCCL = not hasattr(c10d, "ProcessGroupNCCL")
# Fails on Python-3.9, see https://github.com/pytorch/pytorch/issues/51619
if sys.version_info < (3, 9):
Reported by Pylint.
Line: 12
Column: 1
# Fails on Python-3.9, see https://github.com/pytorch/pytorch/issues/51619
if sys.version_info < (3, 9):
class ProcessGroupShareTensorTest(test_c10d_spawn.AbstractProcessGroupShareTensorTest, TestCase):
@classmethod
def _init_pg_nccl(cls, rank, filename, world_size):
store = c10d.FileStore(filename, world_size)
return c10d.ProcessGroupNCCL(store, rank, world_size)
Reported by Pylint.
benchmarks/operator_benchmark/c2/add_test.py
18 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
# Configs for C2 add operator
Reported by Pylint.
Line: 3
Column: 1
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
# Configs for C2 add operator
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
# Configs for C2 add operator
Reported by Pylint.
Line: 10
Column: 20
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
# Configs for C2 add operator
add_long_configs = op_bench.cross_product_configs(
M=[8, 64, 128],
N=range(2, 10, 3),
K=[2 ** x for x in range(0, 3)],
dtype=["int", "float"],
tags=["long"]
Reported by Pylint.
Line: 19
Column: 21
)
add_short_configs = op_bench.config_list(
attrs=[
[8, 16, 32, "int"],
[16, 16, 64, "float"],
[64, 64, 128, "int"],
],
Reported by Pylint.
Line: 3
Column: 1
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
# Configs for C2 add operator
Reported by Pylint.
Line: 7
Column: 1
from caffe2.python import core
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
# Configs for C2 add operator
add_long_configs = op_bench.cross_product_configs(
M=[8, 64, 128],
N=range(2, 10, 3),
Reported by Pylint.
Line: 31
Column: 9
class AddBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, K, dtype):
self.input_one = self.tensor([M, N, K], dtype)
self.input_two = self.tensor([M, N, K], dtype)
self.output = self.tensor([M, N, K], dtype)
self.set_module_name("add")
def forward(self):
Reported by Pylint.
Line: 32
Column: 9
class AddBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, K, dtype):
self.input_one = self.tensor([M, N, K], dtype)
self.input_two = self.tensor([M, N, K], dtype)
self.output = self.tensor([M, N, K], dtype)
self.set_module_name("add")
def forward(self):
op = core.CreateOperator(
Reported by Pylint.
Line: 33
Column: 9
def init(self, M, N, K, dtype):
self.input_one = self.tensor([M, N, K], dtype)
self.input_two = self.tensor([M, N, K], dtype)
self.output = self.tensor([M, N, K], dtype)
self.set_module_name("add")
def forward(self):
op = core.CreateOperator(
"Add", [self.input_one, self.input_two], self.output, **self.args
Reported by Pylint.
caffe2/python/context.py
18 issues
Line: 53
Column: 5
def _context_registry():
global _CONTEXT_REGISTRY
return _CONTEXT_REGISTRY
def _get_managed_classes(obj):
return [
Reported by Pylint.
Line: 106
Column: 5
DefaultManaged is similar to Managed but if there is no parent when
current() is called it makes a new one.
"""
pass
Reported by Pylint.
Line: 1
Column: 1
## @package context
# Module caffe2.python.context
import inspect
import threading
import functools
class _ContextInfo(object):
Reported by Pylint.
Line: 9
Column: 1
import functools
class _ContextInfo(object):
def __init__(self, cls, allow_default):
self.cls = cls
self.allow_default = allow_default
self._local_stack = threading.local()
Reported by Pylint.
Line: 21
Column: 5
self._local_stack.obj = []
return self._local_stack.obj
def enter(self, value):
self._stack.append(value)
def exit(self, value):
assert len(self._stack) > 0, 'Context %s is empty.' % self.cls
assert self._stack.pop() == value
Reported by Pylint.
Line: 24
Column: 5
def enter(self, value):
self._stack.append(value)
def exit(self, value):
assert len(self._stack) > 0, 'Context %s is empty.' % self.cls
assert self._stack.pop() == value
def get_active(self, required=True):
if len(self._stack) == 0:
Reported by Pylint.
Line: 25
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self._stack.append(value)
def exit(self, value):
assert len(self._stack) > 0, 'Context %s is empty.' % self.cls
assert self._stack.pop() == value
def get_active(self, required=True):
if len(self._stack) == 0:
if not required:
Reported by Bandit.
Line: 26
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def exit(self, value):
assert len(self._stack) > 0, 'Context %s is empty.' % self.cls
assert self._stack.pop() == value
def get_active(self, required=True):
if len(self._stack) == 0:
if not required:
return None
Reported by Bandit.
Line: 28
Column: 5
assert len(self._stack) > 0, 'Context %s is empty.' % self.cls
assert self._stack.pop() == value
def get_active(self, required=True):
if len(self._stack) == 0:
if not required:
return None
assert self.allow_default, (
'Context %s is required but none is active.' % self.cls)
Reported by Pylint.
Line: 32
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if len(self._stack) == 0:
if not required:
return None
assert self.allow_default, (
'Context %s is required but none is active.' % self.cls)
self.enter(self.cls())
return self._stack[-1]
Reported by Bandit.
caffe2/python/models/seq2seq/seq2seq_model_helper_test.py
18 issues
Line: 1
Column: 1
from caffe2.python.models.seq2seq import seq2seq_model_helper
from caffe2.python import scope, test_util
Reported by Pylint.
Line: 10
Column: 1
from caffe2.python import scope, test_util
class Seq2SeqModelHelperTest(test_util.TestCase):
def testConstuctor(self):
model_name = 'TestModel'
m = seq2seq_model_helper.Seq2SeqModelHelper(name=model_name)
self.assertEqual(m.name, model_name)
Reported by Pylint.
Line: 11
Column: 5
class Seq2SeqModelHelperTest(test_util.TestCase):
def testConstuctor(self):
model_name = 'TestModel'
m = seq2seq_model_helper.Seq2SeqModelHelper(name=model_name)
self.assertEqual(m.name, model_name)
self.assertEqual(m.init_params, True)
Reported by Pylint.
Line: 11
Column: 5
class Seq2SeqModelHelperTest(test_util.TestCase):
def testConstuctor(self):
model_name = 'TestModel'
m = seq2seq_model_helper.Seq2SeqModelHelper(name=model_name)
self.assertEqual(m.name, model_name)
self.assertEqual(m.init_params, True)
Reported by Pylint.
Line: 13
Column: 9
class Seq2SeqModelHelperTest(test_util.TestCase):
def testConstuctor(self):
model_name = 'TestModel'
m = seq2seq_model_helper.Seq2SeqModelHelper(name=model_name)
self.assertEqual(m.name, model_name)
self.assertEqual(m.init_params, True)
self.assertEqual(m.arg_scope, {
Reported by Pylint.
Line: 24
Column: 5
'order': 'NHWC'
})
def testAddParam(self):
m = seq2seq_model_helper.Seq2SeqModelHelper()
param_name = 'test_param'
param = m.AddParam(param_name, init_value=1)
self.assertEqual(str(param), param_name)
Reported by Pylint.
Line: 24
Column: 5
'order': 'NHWC'
})
def testAddParam(self):
m = seq2seq_model_helper.Seq2SeqModelHelper()
param_name = 'test_param'
param = m.AddParam(param_name, init_value=1)
self.assertEqual(str(param), param_name)
Reported by Pylint.
Line: 25
Column: 9
})
def testAddParam(self):
m = seq2seq_model_helper.Seq2SeqModelHelper()
param_name = 'test_param'
param = m.AddParam(param_name, init_value=1)
self.assertEqual(str(param), param_name)
Reported by Pylint.
Line: 31
Column: 5
param = m.AddParam(param_name, init_value=1)
self.assertEqual(str(param), param_name)
def testGetNonTrainableParams(self):
m = seq2seq_model_helper.Seq2SeqModelHelper()
m.AddParam('test_param1', init_value=1, trainable=True)
p2 = m.AddParam('test_param2', init_value=2, trainable=False)
Reported by Pylint.
Line: 31
Column: 5
param = m.AddParam(param_name, init_value=1)
self.assertEqual(str(param), param_name)
def testGetNonTrainableParams(self):
m = seq2seq_model_helper.Seq2SeqModelHelper()
m.AddParam('test_param1', init_value=1, trainable=True)
p2 = m.AddParam('test_param2', init_value=2, trainable=False)
Reported by Pylint.
caffe2/python/layers/sparse_feature_hash.py
18 issues
Line: 1
Column: 1
## @package sparse_feature_hash
# Module caffe2.python.layers.sparse_feature_hash
from caffe2.python import schema, core
from caffe2.python.layers.layers import (
Reported by Pylint.
Line: 21
Column: 1
import numpy as np
class SparseFeatureHash(ModelLayer):
def __init__(self, model, input_record, seed=0, modulo=None,
use_hashing=True, use_divide_mod=False, divisor=None, name='sparse_feature_hash', **kwargs):
super(SparseFeatureHash, self).__init__(model, name, input_record, **kwargs)
Reported by Pylint.
Line: 23
Column: 5
class SparseFeatureHash(ModelLayer):
def __init__(self, model, input_record, seed=0, modulo=None,
use_hashing=True, use_divide_mod=False, divisor=None, name='sparse_feature_hash', **kwargs):
super(SparseFeatureHash, self).__init__(model, name, input_record, **kwargs)
assert use_hashing + use_divide_mod < 2, "use_hashing and use_divide_mod cannot be set true at the same time."
Reported by Pylint.
Line: 24
Column: 1
class SparseFeatureHash(ModelLayer):
def __init__(self, model, input_record, seed=0, modulo=None,
use_hashing=True, use_divide_mod=False, divisor=None, name='sparse_feature_hash', **kwargs):
super(SparseFeatureHash, self).__init__(model, name, input_record, **kwargs)
assert use_hashing + use_divide_mod < 2, "use_hashing and use_divide_mod cannot be set true at the same time."
if use_divide_mod:
Reported by Pylint.
Line: 25
Column: 9
def __init__(self, model, input_record, seed=0, modulo=None,
use_hashing=True, use_divide_mod=False, divisor=None, name='sparse_feature_hash', **kwargs):
super(SparseFeatureHash, self).__init__(model, name, input_record, **kwargs)
assert use_hashing + use_divide_mod < 2, "use_hashing and use_divide_mod cannot be set true at the same time."
if use_divide_mod:
assert divisor >= 1, 'Unexpected divisor: {}'.format(divisor)
Reported by Pylint.
Line: 27
Column: 1
use_hashing=True, use_divide_mod=False, divisor=None, name='sparse_feature_hash', **kwargs):
super(SparseFeatureHash, self).__init__(model, name, input_record, **kwargs)
assert use_hashing + use_divide_mod < 2, "use_hashing and use_divide_mod cannot be set true at the same time."
if use_divide_mod:
assert divisor >= 1, 'Unexpected divisor: {}'.format(divisor)
self.divisor = self.create_param(param_name='divisor',
Reported by Pylint.
Line: 27
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
use_hashing=True, use_divide_mod=False, divisor=None, name='sparse_feature_hash', **kwargs):
super(SparseFeatureHash, self).__init__(model, name, input_record, **kwargs)
assert use_hashing + use_divide_mod < 2, "use_hashing and use_divide_mod cannot be set true at the same time."
if use_divide_mod:
assert divisor >= 1, 'Unexpected divisor: {}'.format(divisor)
self.divisor = self.create_param(param_name='divisor',
Reported by Bandit.
Line: 30
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert use_hashing + use_divide_mod < 2, "use_hashing and use_divide_mod cannot be set true at the same time."
if use_divide_mod:
assert divisor >= 1, 'Unexpected divisor: {}'.format(divisor)
self.divisor = self.create_param(param_name='divisor',
shape=[1],
initializer=('GivenTensorInt64Fill', {'values': np.array([divisor])}),
optimizer=model.NoOptim)
Reported by Bandit.
Line: 34
Column: 1
self.divisor = self.create_param(param_name='divisor',
shape=[1],
initializer=('GivenTensorInt64Fill', {'values': np.array([divisor])}),
optimizer=model.NoOptim)
self.seed = seed
self.use_hashing = use_hashing
self.use_divide_mod = use_divide_mod
Reported by Pylint.
Line: 45
Column: 1
self.modulo = modulo or self.extract_hash_size(input_record.items.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.items.metadata.feature_specs if input_record.items.metadata else None,
expected_value=input_record.items.metadata.expected_value if input_record.items.metadata else None
)
with core.NameScope(name):
self.output_schema = schema.NewRecord(model.net, IdList)
self.output_schema.items.set_metadata(metadata)
Reported by Pylint.
caffe2/python/caffe_translator_test.py
18 issues
Line: 6
Column: 1
# that all the results look right. In default, it is disabled unless you
# explicitly want to run it.
from google.protobuf import text_format
import numpy as np
import os
import sys
CAFFE_FOUND = False
Reported by Pylint.
Line: 13
Column: 5
CAFFE_FOUND = False
try:
from caffe.proto import caffe_pb2
from caffe2.python import caffe_translator
CAFFE_FOUND = True
except Exception as e:
# Safeguard so that we only catch the caffe module not found exception.
if ("'caffe'" in str(e)):
Reported by Pylint.
Line: 16
Column: 8
from caffe.proto import caffe_pb2
from caffe2.python import caffe_translator
CAFFE_FOUND = True
except Exception as e:
# Safeguard so that we only catch the caffe module not found exception.
if ("'caffe'" in str(e)):
print(
"PyTorch/Caffe2 now requires a separate installation of caffe. "
"Right now, this is not found, so we will skip the caffe "
Reported by Pylint.
Line: 1
Column: 1
# This a large test that goes through the translation of the bvlc caffenet
# model, runs an example through the whole model, and verifies numerically
# that all the results look right. In default, it is disabled unless you
# explicitly want to run it.
from google.protobuf import text_format
import numpy as np
import os
import sys
Reported by Pylint.
Line: 8
Column: 1
from google.protobuf import text_format
import numpy as np
import os
import sys
CAFFE_FOUND = False
try:
from caffe.proto import caffe_pb2
Reported by Pylint.
Line: 9
Column: 1
from google.protobuf import text_format
import numpy as np
import os
import sys
CAFFE_FOUND = False
try:
from caffe.proto import caffe_pb2
from caffe2.python import caffe_translator
Reported by Pylint.
Line: 16
Column: 1
from caffe.proto import caffe_pb2
from caffe2.python import caffe_translator
CAFFE_FOUND = True
except Exception as e:
# Safeguard so that we only catch the caffe module not found exception.
if ("'caffe'" in str(e)):
print(
"PyTorch/Caffe2 now requires a separate installation of caffe. "
"Right now, this is not found, so we will skip the caffe "
Reported by Pylint.
Line: 18
Column: 1
CAFFE_FOUND = True
except Exception as e:
# Safeguard so that we only catch the caffe module not found exception.
if ("'caffe'" in str(e)):
print(
"PyTorch/Caffe2 now requires a separate installation of caffe. "
"Right now, this is not found, so we will skip the caffe "
"translator test.")
Reported by Pylint.
Line: 24
Column: 1
"Right now, this is not found, so we will skip the caffe "
"translator test.")
from caffe2.python import utils, workspace, test_util
import unittest
def setUpModule():
# Do nothing if caffe and test data is not found
if not (CAFFE_FOUND and os.path.exists('data/testdata/caffe_translator')):
Reported by Pylint.
Line: 25
Column: 1
"translator test.")
from caffe2.python import utils, workspace, test_util
import unittest
def setUpModule():
# Do nothing if caffe and test data is not found
if not (CAFFE_FOUND and os.path.exists('data/testdata/caffe_translator')):
return
Reported by Pylint.
benchmarks/operator_benchmark/pt/linear_prepack_fp16_test.py
18 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for linear_prepack_fp16_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT linear_prepack_fp16 operator
linear_prepack_fp16_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
Reported by Pylint.
Line: 7
Column: 36
"""Microbenchmarks for linear_prepack_fp16_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT linear_prepack_fp16 operator
linear_prepack_fp16_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
K=[256, 512],
device=['cpu'],
tags=["long"]
Reported by Pylint.
Line: 15
Column: 37
tags=["long"]
)
linear_prepack_fp16_short_configs = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 1, 1],
[64, 64, 64],
[64, 64, 128],
Reported by Pylint.
Line: 28
Column: 34
tags=["short"],
)
class LinearPrepackFP16Benchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.inputs = {
"input_one": torch.rand(M, N, K, device=device, requires_grad=False, dtype=torch.float32)
}
self.set_module_name("linear_prepack_fp16")
Reported by Pylint.
Line: 41
Column: 1
# The generated test names based on linear_prepack_fp16_short_configs will be in the following pattern:
# linear_prepack_fp16_M8_N16_K32_devicecpu
op_bench.generate_pt_test(linear_prepack_fp16_long_configs + linear_prepack_fp16_short_configs, LinearPrepackFP16Benchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for linear_prepack_fp16_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT linear_prepack_fp16 operator
linear_prepack_fp16_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
Reported by Pylint.
Line: 30
Column: 9
class LinearPrepackFP16Benchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.inputs = {
"input_one": torch.rand(M, N, K, device=device, requires_grad=False, dtype=torch.float32)
}
self.set_module_name("linear_prepack_fp16")
def forward(self, input_one):
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for linear_prepack_fp16_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT linear_prepack_fp16 operator
linear_prepack_fp16_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
Reported by Pylint.
Line: 28
Column: 1
tags=["short"],
)
class LinearPrepackFP16Benchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.inputs = {
"input_one": torch.rand(M, N, K, device=device, requires_grad=False, dtype=torch.float32)
}
self.set_module_name("linear_prepack_fp16")
Reported by Pylint.
Line: 29
Column: 5
)
class LinearPrepackFP16Benchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.inputs = {
"input_one": torch.rand(M, N, K, device=device, requires_grad=False, dtype=torch.float32)
}
self.set_module_name("linear_prepack_fp16")
Reported by Pylint.