The following issues were found
benchmarks/operator_benchmark/pt/instancenorm_test.py
13 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
"""Microbenchmarks for instancenorm operator."""
instancenorm_configs_short = op_bench.cross_product_configs(
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
"""Microbenchmarks for instancenorm operator."""
instancenorm_configs_short = op_bench.cross_product_configs(
Reported by Pylint.
Line: 9
Column: 30
"""Microbenchmarks for instancenorm operator."""
instancenorm_configs_short = op_bench.cross_product_configs(
dims=(
(32, 8, 16),
(32, 8, 56, 56),
),
tags=["short"],
Reported by Pylint.
Line: 18
Column: 29
)
class InstanceNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims):
num_channels = dims[1]
self.inputs = {
"input": (torch.rand(*dims) - 0.5) * 256,
"weight": torch.rand(num_channels, dtype=torch.float),
Reported by Pylint.
Line: 33
Column: 1
input, weight=weight, bias=bias, eps=eps)
op_bench.generate_pt_test(instancenorm_configs_short, InstanceNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 7
Column: 1
import torch.nn.functional as F
"""Microbenchmarks for instancenorm operator."""
instancenorm_configs_short = op_bench.cross_product_configs(
dims=(
(32, 8, 16),
(32, 8, 56, 56),
Reported by Pylint.
Line: 21
Column: 9
class InstanceNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims):
num_channels = dims[1]
self.inputs = {
"input": (torch.rand(*dims) - 0.5) * 256,
"weight": torch.rand(num_channels, dtype=torch.float),
"bias": torch.rand(num_channels, dtype=torch.float),
"eps": 1e-5
}
Reported by Pylint.
Line: 28
Column: 23
"eps": 1e-5
}
def forward(self, input, weight, bias, eps: float):
return F.instance_norm(
input, weight=weight, bias=bias, eps=eps)
op_bench.generate_pt_test(instancenorm_configs_short, InstanceNormBenchmark)
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
"""Microbenchmarks for instancenorm operator."""
instancenorm_configs_short = op_bench.cross_product_configs(
Reported by Pylint.
Line: 18
Column: 1
)
class InstanceNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims):
num_channels = dims[1]
self.inputs = {
"input": (torch.rand(*dims) - 0.5) * 256,
"weight": torch.rand(num_channels, dtype=torch.float),
Reported by Pylint.
caffe2/python/mkl/mkl_pool_op_test.py
13 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings, assume
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings, assume
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLPoolTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
Reported by Pylint.
Line: 29
Column: 34
@settings(max_examples=2, deadline=100)
def test_mkl_pooling(self, stride, pad, kernel, size,
input_channels, batch_size,
method, gc, dc):
assume(pad < kernel)
op = core.CreateOperator(
method,
["X"],
["Y"],
Reported by Pylint.
Line: 46
Column: 5
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings, assume
import numpy as np
Reported by Pylint.
Line: 17
Column: 1
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLPoolTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(7, 9),
input_channels=st.integers(1, 3),
Reported by Pylint.
Line: 27
Column: 5
method=st.sampled_from(["MaxPool", "AveragePool"]),
**mu.gcs)
@settings(max_examples=2, deadline=100)
def test_mkl_pooling(self, stride, pad, kernel, size,
input_channels, batch_size,
method, gc, dc):
assume(pad < kernel)
op = core.CreateOperator(
method,
Reported by Pylint.
Line: 27
Column: 5
method=st.sampled_from(["MaxPool", "AveragePool"]),
**mu.gcs)
@settings(max_examples=2, deadline=100)
def test_mkl_pooling(self, stride, pad, kernel, size,
input_channels, batch_size,
method, gc, dc):
assume(pad < kernel)
op = core.CreateOperator(
method,
Reported by Pylint.
Line: 27
Column: 5
method=st.sampled_from(["MaxPool", "AveragePool"]),
**mu.gcs)
@settings(max_examples=2, deadline=100)
def test_mkl_pooling(self, stride, pad, kernel, size,
input_channels, batch_size,
method, gc, dc):
assume(pad < kernel)
op = core.CreateOperator(
method,
Reported by Pylint.
caffe2/python/mkl/mkl_elementwise_sum_op_test.py
13 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLElementwiseSumTest(hu.HypothesisTestCase):
@given(size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
Reported by Pylint.
Line: 30
Column: 34
batch_size,
inputs,
inplace,
gc,
dc):
op = core.CreateOperator(
"Sum",
["X_{}".format(i) for i in range(inputs)],
["X_0" if inplace else "Y"],
Reported by Pylint.
Line: 43
Column: 5
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
Line: 17
Column: 1
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLElementwiseSumTest(hu.HypothesisTestCase):
@given(size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inputs=st.integers(1, 3),
inplace=st.booleans(),
Reported by Pylint.
Line: 24
Column: 5
inputs=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_mkl_elementwise_sum(self,
size,
input_channels,
batch_size,
inputs,
inplace,
Reported by Pylint.
Line: 24
Column: 5
inputs=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_mkl_elementwise_sum(self,
size,
input_channels,
batch_size,
inputs,
inplace,
Reported by Pylint.
Line: 24
Column: 5
inputs=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_mkl_elementwise_sum(self,
size,
input_channels,
batch_size,
inputs,
inplace,
Reported by Pylint.
caffe2/python/lazy_dyndep.py
13 issues
Line: 77
Column: 13
return
for name in list(_LAZY_IMPORTED_DYNDEPS):
try:
dyndep.InitOpLibrary(name, trigger_lazy=False)
except BaseException as e:
if _error_handler:
_error_handler(e)
finally:
_LAZY_IMPORTED_DYNDEPS.remove(name)
Reported by Pylint.
Line: 40
Column: 5
# time when an actual call is made.
print('Ignoring {} as it is not a valid file.'.format(name))
return
global _LAZY_IMPORTED_DYNDEPS
_LAZY_IMPORTED_DYNDEPS.add(name)
_LAZY_IMPORTED_DYNDEPS = set()
_error_handler = None
Reported by Pylint.
Line: 62
Column: 5
None
"""
global _error_handler
_error_handler = handler
def GetImportedOpsLibraries():
_import_lazy()
Reported by Pylint.
Line: 72
Column: 5
def _import_lazy():
global _LAZY_IMPORTED_DYNDEPS
if not _LAZY_IMPORTED_DYNDEPS:
return
for name in list(_LAZY_IMPORTED_DYNDEPS):
try:
dyndep.InitOpLibrary(name, trigger_lazy=False)
Reported by Pylint.
Line: 78
Column: 16
for name in list(_LAZY_IMPORTED_DYNDEPS):
try:
dyndep.InitOpLibrary(name, trigger_lazy=False)
except BaseException as e:
if _error_handler:
_error_handler(e)
finally:
_LAZY_IMPORTED_DYNDEPS.remove(name)
Reported by Pylint.
Line: 1
Column: 1
## @package lazy_dyndep
# Module caffe2.python.lazy_dyndep
import os
from caffe2.python import dyndep, lazy
Reported by Pylint.
Line: 12
Column: 1
from caffe2.python import dyndep, lazy
def RegisterOpsLibrary(name):
"""Registers a dynamic library that contains custom operators into Caffe2.
Since Caffe2 uses static variable registration, you can optionally load a
separate .so file that contains custom operators and registers that into
the caffe2 core binary. In C++, this is usually done by either declaring
Reported by Pylint.
Line: 45
Column: 1
_LAZY_IMPORTED_DYNDEPS = set()
_error_handler = None
def SetErrorHandler(handler):
"""Registers an error handler for errors from registering operators
Reported by Pylint.
Line: 48
Column: 1
_error_handler = None
def SetErrorHandler(handler):
"""Registers an error handler for errors from registering operators
Since the lazy registration may happen at a much later time, having a dedicated
error handler allows for custom error handling logic. It is highly
recomended to set this to prevent errors from bubbling up in weird parts of the
Reported by Pylint.
Line: 62
Column: 5
None
"""
global _error_handler
_error_handler = handler
def GetImportedOpsLibraries():
_import_lazy()
Reported by Pylint.
caffe2/python/ideep/softmax_op_test.py
13 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 14
Column: 22
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class SoftmaxTest(hu.HypothesisTestCase):
@given(size=st.integers(8, 20),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inplace=st.booleans(),
Reported by Pylint.
Line: 21
Column: 71
batch_size=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_softmax(self, size, input_channels, batch_size, inplace, gc, dc):
op = core.CreateOperator(
"Softmax",
["X"],
["Y"],
axis=1,
Reported by Pylint.
Line: 21
Column: 62
batch_size=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_softmax(self, size, input_channels, batch_size, inplace, gc, dc):
op = core.CreateOperator(
"Softmax",
["X"],
["Y"],
axis=1,
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
Line: 15
Column: 1
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class SoftmaxTest(hu.HypothesisTestCase):
@given(size=st.integers(8, 20),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
Reported by Pylint.
Line: 21
Column: 5
batch_size=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_softmax(self, size, input_channels, batch_size, inplace, gc, dc):
op = core.CreateOperator(
"Softmax",
["X"],
["Y"],
axis=1,
Reported by Pylint.
Line: 21
Column: 5
batch_size=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_softmax(self, size, input_channels, batch_size, inplace, gc, dc):
op = core.CreateOperator(
"Softmax",
["X"],
["Y"],
axis=1,
Reported by Pylint.
Line: 21
Column: 5
batch_size=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_softmax(self, size, input_channels, batch_size, inplace, gc, dc):
op = core.CreateOperator(
"Softmax",
["X"],
["Y"],
axis=1,
Reported by Pylint.
caffe2/python/layers/pairwise_similarity.py
13 issues
Line: 93
Column: 17
)
net.BatchGather(
[flattened, self.indices_to_gather()],
self.output_schema(),
)
else:
net.Flatten(Y, self.output_schema())
Reported by Pylint.
Line: 96
Column: 28
self.output_schema(),
)
else:
net.Flatten(Y, self.output_schema())
Reported by Pylint.
Line: 1
Column: 1
## @package dot_product
# Module caffe2.python.layers.dot_product
from caffe2.python import schema
from caffe2.python.layers.layers import (
Reported by Pylint.
Line: 14
Column: 1
)
class PairwiseSimilarity(ModelLayer):
def __init__(self, model, input_record, output_dim, pairwise_similarity_func='dot',
name='pairwise_similarity', **kwargs):
super(PairwiseSimilarity, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Struct), (
Reported by Pylint.
Line: 16
Column: 5
class PairwiseSimilarity(ModelLayer):
def __init__(self, model, input_record, output_dim, pairwise_similarity_func='dot',
name='pairwise_similarity', **kwargs):
super(PairwiseSimilarity, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Struct), (
"Incorrect input type. Expected Struct, but received: {0}".
format(input_record))
Reported by Pylint.
Line: 18
Column: 9
def __init__(self, model, input_record, output_dim, pairwise_similarity_func='dot',
name='pairwise_similarity', **kwargs):
super(PairwiseSimilarity, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Struct), (
"Incorrect input type. Expected Struct, but received: {0}".
format(input_record))
assert (
('all_embeddings' in input_record) ^
Reported by Pylint.
Line: 19
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def __init__(self, model, input_record, output_dim, pairwise_similarity_func='dot',
name='pairwise_similarity', **kwargs):
super(PairwiseSimilarity, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Struct), (
"Incorrect input type. Expected Struct, but received: {0}".
format(input_record))
assert (
('all_embeddings' in input_record) ^
('x_embeddings' in input_record and 'y_embeddings' in input_record)
Reported by Bandit.
Line: 22
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert isinstance(input_record, schema.Struct), (
"Incorrect input type. Expected Struct, but received: {0}".
format(input_record))
assert (
('all_embeddings' in input_record) ^
('x_embeddings' in input_record and 'y_embeddings' in input_record)
), (
"either (all_embeddings) xor (x_embeddings and y_embeddings) " +
"should be given."
Reported by Bandit.
Line: 37
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
x_embeddings = input_record['x_embeddings']
y_embeddings = input_record['y_embeddings']
assert isinstance(x_embeddings, schema.Scalar), (
"Incorrect input type for x. Expected Scalar, " +
"but received: {0}".format(x_embeddings))
assert isinstance(y_embeddings, schema.Scalar), (
"Incorrect input type for y. Expected Scalar, " +
"but received: {0}".format(y_embeddings)
Reported by Bandit.
Line: 40
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert isinstance(x_embeddings, schema.Scalar), (
"Incorrect input type for x. Expected Scalar, " +
"but received: {0}".format(x_embeddings))
assert isinstance(y_embeddings, schema.Scalar), (
"Incorrect input type for y. Expected Scalar, " +
"but received: {0}".format(y_embeddings)
)
if 'indices_to_gather' in input_record:
Reported by Bandit.
caffe2/python/ideep/LRN_op_test.py
13 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class LRNTest(hu.HypothesisTestCase):
@given(input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
im_size=st.integers(1, 10),
order=st.sampled_from(["NCHW"]),
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
Reported by Pylint.
Line: 16
Column: 1
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class LRNTest(hu.HypothesisTestCase):
@given(input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
im_size=st.integers(1, 10),
order=st.sampled_from(["NCHW"]),
**mu.gcs)
Reported by Pylint.
Line: 23
Column: 5
order=st.sampled_from(["NCHW"]),
**mu.gcs)
@settings(deadline=10000)
def test_LRN(self, input_channels,
batch_size, im_size, order,
gc, dc):
op = core.CreateOperator(
"LRN",
["X"],
Reported by Pylint.
Line: 23
Column: 5
order=st.sampled_from(["NCHW"]),
**mu.gcs)
@settings(deadline=10000)
def test_LRN(self, input_channels,
batch_size, im_size, order,
gc, dc):
op = core.CreateOperator(
"LRN",
["X"],
Reported by Pylint.
Line: 23
Column: 5
order=st.sampled_from(["NCHW"]),
**mu.gcs)
@settings(deadline=10000)
def test_LRN(self, input_channels,
batch_size, im_size, order,
gc, dc):
op = core.CreateOperator(
"LRN",
["X"],
Reported by Pylint.
Line: 23
Column: 5
order=st.sampled_from(["NCHW"]),
**mu.gcs)
@settings(deadline=10000)
def test_LRN(self, input_channels,
batch_size, im_size, order,
gc, dc):
op = core.CreateOperator(
"LRN",
["X"],
Reported by Pylint.
.github/scripts/ensure_actions_will_cancel.py
13 issues
Line: 14
Column: 21
WORKFLOWS = REPO_ROOT / ".github" / "workflows"
def concurrency_key(filename: Path) -> str:
workflow_name = filename.with_suffix("").name.replace("_", "-")
if workflow_name.startswith("generated-"):
workflow_name = workflow_name[len("generated-"):]
return f"{workflow_name}-${{{{ github.event.pull_request.number || github.sha }}}}"
Reported by Pylint.
Line: 21
Column: 18
return f"{workflow_name}-${{{{ github.event.pull_request.number || github.sha }}}}"
def should_check(filename: Path) -> bool:
with open(filename, "r") as f:
content = f.read()
data = yaml.safe_load(content)
on = data.get("on", data.get(True, {}))
Reported by Pylint.
Line: 22
Column: 33
def should_check(filename: Path) -> bool:
with open(filename, "r") as f:
content = f.read()
data = yaml.safe_load(content)
on = data.get("on", data.get(True, {}))
return "pull_request" in on
Reported by Pylint.
Line: 25
Column: 5
with open(filename, "r") as f:
content = f.read()
data = yaml.safe_load(content)
on = data.get("on", data.get(True, {}))
return "pull_request" in on
if __name__ == "__main__":
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
import argparse
import sys
import yaml
from pathlib import Path
Reported by Pylint.
Line: 7
Column: 1
import sys
import yaml
from pathlib import Path
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
WORKFLOWS = REPO_ROOT / ".github" / "workflows"
Reported by Pylint.
Line: 14
Column: 1
WORKFLOWS = REPO_ROOT / ".github" / "workflows"
def concurrency_key(filename: Path) -> str:
workflow_name = filename.with_suffix("").name.replace("_", "-")
if workflow_name.startswith("generated-"):
workflow_name = workflow_name[len("generated-"):]
return f"{workflow_name}-${{{{ github.event.pull_request.number || github.sha }}}}"
Reported by Pylint.
Line: 21
Column: 1
return f"{workflow_name}-${{{{ github.event.pull_request.number || github.sha }}}}"
def should_check(filename: Path) -> bool:
with open(filename, "r") as f:
content = f.read()
data = yaml.safe_load(content)
on = data.get("on", data.get(True, {}))
Reported by Pylint.
Line: 22
Column: 33
def should_check(filename: Path) -> bool:
with open(filename, "r") as f:
content = f.read()
data = yaml.safe_load(content)
on = data.get("on", data.get(True, {}))
return "pull_request" in on
Reported by Pylint.
Line: 26
Column: 5
content = f.read()
data = yaml.safe_load(content)
on = data.get("on", data.get(True, {}))
return "pull_request" in on
if __name__ == "__main__":
parser = argparse.ArgumentParser(
Reported by Pylint.
caffe2/python/ideep/transpose_op_test.py
13 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 14
Column: 22
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TransposeTest(hu.HypothesisTestCase):
@given(
X=hu.tensor(min_dim=1, max_dim=5, dtype=np.float32), use_axes=st.booleans(), **mu.gcs)
@settings(deadline=None, max_examples=50)
def test_transpose(self, X, use_axes, gc, dc):
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
Reported by Pylint.
Line: 15
Column: 1
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TransposeTest(hu.HypothesisTestCase):
@given(
X=hu.tensor(min_dim=1, max_dim=5, dtype=np.float32), use_axes=st.booleans(), **mu.gcs)
@settings(deadline=None, max_examples=50)
def test_transpose(self, X, use_axes, gc, dc):
ndim = len(X.shape)
Reported by Pylint.
Line: 19
Column: 5
@given(
X=hu.tensor(min_dim=1, max_dim=5, dtype=np.float32), use_axes=st.booleans(), **mu.gcs)
@settings(deadline=None, max_examples=50)
def test_transpose(self, X, use_axes, gc, dc):
ndim = len(X.shape)
axes = np.arange(ndim)
np.random.shuffle(axes)
if use_axes:
Reported by Pylint.
Line: 19
Column: 5
@given(
X=hu.tensor(min_dim=1, max_dim=5, dtype=np.float32), use_axes=st.booleans(), **mu.gcs)
@settings(deadline=None, max_examples=50)
def test_transpose(self, X, use_axes, gc, dc):
ndim = len(X.shape)
axes = np.arange(ndim)
np.random.shuffle(axes)
if use_axes:
Reported by Pylint.
Line: 19
Column: 5
@given(
X=hu.tensor(min_dim=1, max_dim=5, dtype=np.float32), use_axes=st.booleans(), **mu.gcs)
@settings(deadline=None, max_examples=50)
def test_transpose(self, X, use_axes, gc, dc):
ndim = len(X.shape)
axes = np.arange(ndim)
np.random.shuffle(axes)
if use_axes:
Reported by Pylint.
Line: 19
Column: 5
@given(
X=hu.tensor(min_dim=1, max_dim=5, dtype=np.float32), use_axes=st.booleans(), **mu.gcs)
@settings(deadline=None, max_examples=50)
def test_transpose(self, X, use_axes, gc, dc):
ndim = len(X.shape)
axes = np.arange(ndim)
np.random.shuffle(axes)
if use_axes:
Reported by Pylint.
Line: 25
Column: 13
np.random.shuffle(axes)
if use_axes:
op = core.CreateOperator(
"Transpose", ["X"], ["Y"], axes=axes, device_option=gc)
else:
op = core.CreateOperator(
"Transpose", ["X"], ["Y"], device_option=gc)
Reported by Pylint.
caffe2/contrib/playground/module_map.py
13 issues
Line: 7
Column: 1
# Input
import caffe2.contrib.playground.resnetdemo.\
gfs_IN1k as gfs_IN1k # noqa
# model
import caffe2.contrib.playground.resnetdemo.\
IN1k_resnet as IN1k_resnet # noqa
Reported by Pylint.
Line: 11
Column: 1
gfs_IN1k as gfs_IN1k # noqa
# model
import caffe2.contrib.playground.resnetdemo.\
IN1k_resnet as IN1k_resnet # noqa
import caffe2.contrib.playground.resnetdemo.\
IN1k_resnet_no_test_model as IN1k_resnet_no_test_model # noqa
Reported by Pylint.
Line: 14
Column: 1
import caffe2.contrib.playground.resnetdemo.\
IN1k_resnet as IN1k_resnet # noqa
import caffe2.contrib.playground.resnetdemo.\
IN1k_resnet_no_test_model as IN1k_resnet_no_test_model # noqa
# Additional override
import caffe2.contrib.playground.resnetdemo.\
override_no_test_model_no_checkpoint as override_no_test_model_no_checkpoint # noqa
Reported by Pylint.
Line: 18
Column: 1
IN1k_resnet_no_test_model as IN1k_resnet_no_test_model # noqa
# Additional override
import caffe2.contrib.playground.resnetdemo.\
override_no_test_model_no_checkpoint as override_no_test_model_no_checkpoint # noqa
# FORWARD_PASS
import caffe2.contrib.playground.resnetdemo.\
caffe2_resnet50_default_forward as caffe2_resnet50_default_forward # noqa
Reported by Pylint.
Line: 22
Column: 1
override_no_test_model_no_checkpoint as override_no_test_model_no_checkpoint # noqa
# FORWARD_PASS
import caffe2.contrib.playground.resnetdemo.\
caffe2_resnet50_default_forward as caffe2_resnet50_default_forward # noqa
import caffe2.contrib.playground.resnetdemo.\
explicit_resnet_forward as explicit_resnet_forward # noqa
Reported by Pylint.
Line: 25
Column: 1
import caffe2.contrib.playground.resnetdemo.\
caffe2_resnet50_default_forward as caffe2_resnet50_default_forward # noqa
import caffe2.contrib.playground.resnetdemo.\
explicit_resnet_forward as explicit_resnet_forward # noqa
# PARAMETER_UPDATE
import caffe2.contrib.playground.resnetdemo.\
caffe2_resnet50_default_param_update as caffe2_resnet50_default_param_update # noqa
Reported by Pylint.
Line: 29
Column: 1
explicit_resnet_forward as explicit_resnet_forward # noqa
# PARAMETER_UPDATE
import caffe2.contrib.playground.resnetdemo.\
caffe2_resnet50_default_param_update as caffe2_resnet50_default_param_update # noqa
import caffe2.contrib.playground.resnetdemo.\
explicit_resnet_param_update as explicit_resnet_param_update # noqa
Reported by Pylint.
Line: 32
Column: 1
import caffe2.contrib.playground.resnetdemo.\
caffe2_resnet50_default_param_update as caffe2_resnet50_default_param_update # noqa
import caffe2.contrib.playground.resnetdemo.\
explicit_resnet_param_update as explicit_resnet_param_update # noqa
# RENDEZVOUS
import caffe2.contrib.playground.resnetdemo.\
rendezvous_filestore as rendezvous_filestore # noqa
Reported by Pylint.
Line: 36
Column: 1
explicit_resnet_param_update as explicit_resnet_param_update # noqa
# RENDEZVOUS
import caffe2.contrib.playground.resnetdemo.\
rendezvous_filestore as rendezvous_filestore # noqa
# OUTPUT
import caffe2.contrib.playground.\
output_generator as output_generator # noqa
Reported by Pylint.
Line: 40
Column: 1
rendezvous_filestore as rendezvous_filestore # noqa
# OUTPUT
import caffe2.contrib.playground.\
output_generator as output_generator # noqa
# METERS
# for meters, use the class name as your module name in this map
import caffe2.contrib.playground.\
Reported by Pylint.