The following issues were found
test/test_cpp_api_parity.py
17 issues
Line: 1
Column: 1
import torch
# NN tests use double as the default dtype
torch.set_default_dtype(torch.double)
import os
import torch.testing._internal.common_utils as common
import torch.testing._internal.common_nn as common_nn
from cpp_api_parity.parity_table_parser import parse_parity_tracker_table
Reported by Pylint.
Line: 7
Column: 1
import os
import torch.testing._internal.common_utils as common
import torch.testing._internal.common_nn as common_nn
from cpp_api_parity.parity_table_parser import parse_parity_tracker_table
from cpp_api_parity.utils import is_torch_nn_functional_test
from cpp_api_parity import module_impl_check, functional_impl_check, sample_module, sample_functional
Reported by Pylint.
Line: 8
Column: 1
import os
import torch.testing._internal.common_utils as common
import torch.testing._internal.common_nn as common_nn
from cpp_api_parity.parity_table_parser import parse_parity_tracker_table
from cpp_api_parity.utils import is_torch_nn_functional_test
from cpp_api_parity import module_impl_check, functional_impl_check, sample_module, sample_functional
# NOTE: turn this on if you want to print source code of all C++ tests (e.g. for debugging purpose)
Reported by Pylint.
Line: 1
Column: 1
import torch
# NN tests use double as the default dtype
torch.set_default_dtype(torch.double)
import os
import torch.testing._internal.common_utils as common
import torch.testing._internal.common_nn as common_nn
from cpp_api_parity.parity_table_parser import parse_parity_tracker_table
Reported by Pylint.
Line: 5
Column: 1
# NN tests use double as the default dtype
torch.set_default_dtype(torch.double)
import os
import torch.testing._internal.common_utils as common
import torch.testing._internal.common_nn as common_nn
from cpp_api_parity.parity_table_parser import parse_parity_tracker_table
from cpp_api_parity.utils import is_torch_nn_functional_test
Reported by Pylint.
Line: 5
Column: 1
# NN tests use double as the default dtype
torch.set_default_dtype(torch.double)
import os
import torch.testing._internal.common_utils as common
import torch.testing._internal.common_nn as common_nn
from cpp_api_parity.parity_table_parser import parse_parity_tracker_table
from cpp_api_parity.utils import is_torch_nn_functional_test
Reported by Pylint.
Line: 7
Column: 1
import os
import torch.testing._internal.common_utils as common
import torch.testing._internal.common_nn as common_nn
from cpp_api_parity.parity_table_parser import parse_parity_tracker_table
from cpp_api_parity.utils import is_torch_nn_functional_test
from cpp_api_parity import module_impl_check, functional_impl_check, sample_module, sample_functional
Reported by Pylint.
Line: 8
Column: 1
import os
import torch.testing._internal.common_utils as common
import torch.testing._internal.common_nn as common_nn
from cpp_api_parity.parity_table_parser import parse_parity_tracker_table
from cpp_api_parity.utils import is_torch_nn_functional_test
from cpp_api_parity import module_impl_check, functional_impl_check, sample_module, sample_functional
# NOTE: turn this on if you want to print source code of all C++ tests (e.g. for debugging purpose)
Reported by Pylint.
Line: 9
Column: 1
import torch.testing._internal.common_utils as common
import torch.testing._internal.common_nn as common_nn
from cpp_api_parity.parity_table_parser import parse_parity_tracker_table
from cpp_api_parity.utils import is_torch_nn_functional_test
from cpp_api_parity import module_impl_check, functional_impl_check, sample_module, sample_functional
# NOTE: turn this on if you want to print source code of all C++ tests (e.g. for debugging purpose)
PRINT_CPP_SOURCE = False
Reported by Pylint.
Line: 10
Column: 1
import torch.testing._internal.common_utils as common
import torch.testing._internal.common_nn as common_nn
from cpp_api_parity.parity_table_parser import parse_parity_tracker_table
from cpp_api_parity.utils import is_torch_nn_functional_test
from cpp_api_parity import module_impl_check, functional_impl_check, sample_module, sample_functional
# NOTE: turn this on if you want to print source code of all C++ tests (e.g. for debugging purpose)
PRINT_CPP_SOURCE = False
Reported by Pylint.
torch/fx/experimental/fx_acc/acc_utils.py
17 issues
Line: 72
Column: 5
def build_raw_tensor_meta(
shape=None,
dtype=None,
requires_grad=None,
stride=None,
memory_format=None,
is_quantized=None,
Reported by Pylint.
Line: 73
Column: 5
def build_raw_tensor_meta(
shape=None,
dtype=None,
requires_grad=None,
stride=None,
memory_format=None,
is_quantized=None,
qscheme=None,
Reported by Pylint.
Line: 74
Column: 5
def build_raw_tensor_meta(
shape=None,
dtype=None,
requires_grad=None,
stride=None,
memory_format=None,
is_quantized=None,
qscheme=None,
q_scale=None,
Reported by Pylint.
Line: 75
Column: 5
shape=None,
dtype=None,
requires_grad=None,
stride=None,
memory_format=None,
is_quantized=None,
qscheme=None,
q_scale=None,
q_zero_point=None,
Reported by Pylint.
Line: 76
Column: 5
dtype=None,
requires_grad=None,
stride=None,
memory_format=None,
is_quantized=None,
qscheme=None,
q_scale=None,
q_zero_point=None,
):
Reported by Pylint.
Line: 77
Column: 5
requires_grad=None,
stride=None,
memory_format=None,
is_quantized=None,
qscheme=None,
q_scale=None,
q_zero_point=None,
):
return TensorMetadata(**locals())
Reported by Pylint.
Line: 78
Column: 5
stride=None,
memory_format=None,
is_quantized=None,
qscheme=None,
q_scale=None,
q_zero_point=None,
):
return TensorMetadata(**locals())
Reported by Pylint.
Line: 79
Column: 5
memory_format=None,
is_quantized=None,
qscheme=None,
q_scale=None,
q_zero_point=None,
):
return TensorMetadata(**locals())
Reported by Pylint.
Line: 80
Column: 5
is_quantized=None,
qscheme=None,
q_scale=None,
q_zero_point=None,
):
return TensorMetadata(**locals())
def draw_graph(traced: torch.fx.GraphModule, fname: str, figname: str = "fx_graph"):
Reported by Pylint.
Line: 1
Column: 1
import inspect
import json
from typing import Any, Tuple, Callable, Union, Dict
import torch
import torch.fx
from torch.fx.experimental.graph_manipulation import (
serialize_module,
)
Reported by Pylint.
torch/_namedtensor_internals.py
17 issues
Line: 3
Column: 1
from collections import OrderedDict
"""
This file contains helper functions that implement experimental functionality
for named tensors in python. All of these are experimental, unstable, and
subject to change or deletion.
"""
Reported by Pylint.
Line: 78
Column: 16
def update_names_with_list(tensor, names, inplace):
# Special case for tensor.rename(None)
if len(names) == 1 and names[0] is None:
return tensor._update_names(None, inplace)
return tensor._update_names(
resolve_ellipsis(names, tensor.names, namer_api_name(inplace)), inplace)
Reported by Pylint.
Line: 80
Column: 12
if len(names) == 1 and names[0] is None:
return tensor._update_names(None, inplace)
return tensor._update_names(
resolve_ellipsis(names, tensor.names, namer_api_name(inplace)), inplace)
def update_names_with_mapping(tensor, rename_map, inplace):
dim_map = build_dim_map(tensor)
Reported by Pylint.
Line: 95
Column: 12
'{new_dim} in Tensor[{dims}] but dim \'{old_dim}\' does not exist')
.format(old_dim=old_dim, new_dim=new_dim, dims=tensor.names,
api_name=namer_api_name(inplace)))
return tensor._update_names(tuple(dim_map.values()), inplace)
def update_names(tensor, names, rename_map, inplace):
"""There are two usages:
Reported by Pylint.
Line: 1
Column: 1
from collections import OrderedDict
"""
This file contains helper functions that implement experimental functionality
for named tensors in python. All of these are experimental, unstable, and
subject to change or deletion.
"""
Reported by Pylint.
Line: 10
Column: 1
"""
def check_serializing_named_tensor(tensor):
if tensor.has_names():
raise RuntimeError(
"NYI: Named tensors don't support serialization. Please drop "
"names via `tensor = tensor.rename(None)` before serialization.")
Reported by Pylint.
Line: 24
Column: 1
for idx, name in enumerate(tensor.names)])
def unzip_namedshape(namedshape):
if isinstance(namedshape, OrderedDict):
namedshape = namedshape.items()
if not hasattr(namedshape, '__iter__') and not isinstance(namedshape, tuple):
raise RuntimeError(
'Expected namedshape to be OrderedDict or iterable of tuples, got: {}'
Reported by Pylint.
Line: 36
Column: 1
return zip(*namedshape)
def namer_api_name(inplace):
if inplace:
return 'rename_'
else:
return 'rename'
Reported by Pylint.
Line: 37
Column: 5
def namer_api_name(inplace):
if inplace:
return 'rename_'
else:
return 'rename'
Reported by Pylint.
Line: 43
Column: 1
return 'rename'
def is_ellipsis(item):
return item == Ellipsis or item == '...'
def single_ellipsis_index(names, fn_name):
ellipsis_indices = [i for i, name in enumerate(names) if is_ellipsis(name)]
if len(ellipsis_indices) >= 2:
Reported by Pylint.
test/package/test_dependency_hooks.py
17 issues
Line: 3
Column: 1
from io import BytesIO
from torch.package import (
PackageExporter,
)
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
Reported by Pylint.
Line: 6
Column: 1
from torch.package import (
PackageExporter,
)
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
Reported by Pylint.
Line: 26
Column: 28
my_externs = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern(["package_a.subpackage", "module_a"])
exporter.register_extern_hook(my_extern_hook)
Reported by Pylint.
Line: 41
Column: 28
my_externs = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
# This also checks ordering, since `remove()` will fail if the value is not in the set.
def my_extern_hook2(package_exporter, module_name):
my_externs.remove(module_name)
Reported by Pylint.
Line: 45
Column: 29
my_externs.add(module_name)
# This also checks ordering, since `remove()` will fail if the value is not in the set.
def my_extern_hook2(package_exporter, module_name):
my_externs.remove(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern(["package_a.subpackage", "module_a"])
exporter.register_extern_hook(my_extern_hook)
Reported by Pylint.
Line: 61
Column: 26
my_mocks = set()
def my_mock_hook(package_exporter, module_name):
my_mocks.add(module_name)
# This also checks ordering, since `remove()` will fail if the value is not in the set.
def my_mock_hook2(package_exporter, module_name):
my_mocks.remove(module_name)
Reported by Pylint.
Line: 65
Column: 27
my_mocks.add(module_name)
# This also checks ordering, since `remove()` will fail if the value is not in the set.
def my_mock_hook2(package_exporter, module_name):
my_mocks.remove(module_name)
with PackageExporter(buffer) as exporter:
exporter.mock(["package_a.subpackage", "module_a"])
exporter.register_mock_hook(my_mock_hook)
Reported by Pylint.
Line: 82
Column: 28
my_externs = set()
my_externs2 = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
def my_extern_hook2(package_exporter, module_name):
my_externs2.add(module_name)
Reported by Pylint.
Line: 85
Column: 29
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
def my_extern_hook2(package_exporter, module_name):
my_externs2.add(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern(["package_a.subpackage", "module_a"])
handle = exporter.register_extern_hook(my_extern_hook)
Reported by Pylint.
Line: 104
Column: 28
my_externs = set()
my_mocks = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
def my_mock_hook(package_exporter, module_name):
my_mocks.add(module_name)
Reported by Pylint.
tools/setup_helpers/env.py
17 issues
Line: 61
Column: 13
cmake_cache_txt = os.path.join(BUILD_DIR, 'CMakeCache.txt')
if os.path.isfile(cmake_cache_txt):
# Found CMakeCache.txt. Use the build type specified in it.
from .cmake import get_cmake_cache_variables_from_file
with open(cmake_cache_txt) as f:
cmake_cache_vars = get_cmake_cache_variables_from_file(f)
# Normally it is anti-pattern to determine build type from CMAKE_BUILD_TYPE because it is not used for
# multi-configuration build tools, such as Visual Studio and XCode. But since we always communicate with
# CMake using CMAKE_BUILD_TYPE from our Python scripts, this is OK here.
Reported by Pylint.
Line: 1
Column: 1
import os
import platform
import struct
import sys
from itertools import chain
from typing import Iterable, List, Optional, cast
IS_WINDOWS = (platform.system() == 'Windows')
Reported by Pylint.
Line: 13
Column: 1
IS_DARWIN = (platform.system() == 'Darwin')
IS_LINUX = (platform.system() == 'Linux')
IS_CONDA = 'conda' in sys.version or 'Continuum' in sys.version or any([x.startswith('CONDA') for x in os.environ])
CONDA_DIR = os.path.join(os.path.dirname(sys.executable), '..')
IS_64BIT = (struct.calcsize("P") == 8)
BUILD_DIR = 'build'
Reported by Pylint.
Line: 13
Column: 68
IS_DARWIN = (platform.system() == 'Darwin')
IS_LINUX = (platform.system() == 'Linux')
IS_CONDA = 'conda' in sys.version or 'Continuum' in sys.version or any([x.startswith('CONDA') for x in os.environ])
CONDA_DIR = os.path.join(os.path.dirname(sys.executable), '..')
IS_64BIT = (struct.calcsize("P") == 8)
BUILD_DIR = 'build'
Reported by Pylint.
Line: 21
Column: 1
BUILD_DIR = 'build'
def check_env_flag(name: str, default: str = '') -> bool:
return os.getenv(name, default).upper() in ['ON', '1', 'YES', 'TRUE', 'Y']
def check_negative_env_flag(name: str, default: str = '') -> bool:
return os.getenv(name, default).upper() in ['OFF', '0', 'NO', 'FALSE', 'N']
Reported by Pylint.
Line: 25
Column: 1
return os.getenv(name, default).upper() in ['ON', '1', 'YES', 'TRUE', 'Y']
def check_negative_env_flag(name: str, default: str = '') -> bool:
return os.getenv(name, default).upper() in ['OFF', '0', 'NO', 'FALSE', 'N']
def gather_paths(env_vars: Iterable[str]) -> List[str]:
return list(chain(*(os.getenv(v, '').split(os.pathsep) for v in env_vars)))
Reported by Pylint.
Line: 29
Column: 1
return os.getenv(name, default).upper() in ['OFF', '0', 'NO', 'FALSE', 'N']
def gather_paths(env_vars: Iterable[str]) -> List[str]:
return list(chain(*(os.getenv(v, '').split(os.pathsep) for v in env_vars)))
def lib_paths_from_base(base_path: str) -> List[str]:
return [os.path.join(base_path, s) for s in ['lib/x64', 'lib', 'lib64']]
Reported by Pylint.
Line: 33
Column: 1
return list(chain(*(os.getenv(v, '').split(os.pathsep) for v in env_vars)))
def lib_paths_from_base(base_path: str) -> List[str]:
return [os.path.join(base_path, s) for s in ['lib/x64', 'lib', 'lib64']]
# We promised that CXXFLAGS should also be affected by CFLAGS
if 'CFLAGS' in os.environ and 'CXXFLAGS' not in os.environ:
Reported by Pylint.
Line: 42
Column: 1
os.environ['CXXFLAGS'] = os.environ['CFLAGS']
class BuildType(object):
"""Checks build type. The build type will be given in :attr:`cmake_build_type_env`. If :attr:`cmake_build_type_env`
is ``None``, then the build type will be inferred from ``CMakeCache.txt``. If ``CMakeCache.txt`` does not exist,
os.environ['CMAKE_BUILD_TYPE'] will be used.
Args:
Reported by Pylint.
Line: 43
Column: 1
class BuildType(object):
"""Checks build type. The build type will be given in :attr:`cmake_build_type_env`. If :attr:`cmake_build_type_env`
is ``None``, then the build type will be inferred from ``CMakeCache.txt``. If ``CMakeCache.txt`` does not exist,
os.environ['CMAKE_BUILD_TYPE'] will be used.
Args:
cmake_build_type_env (str): The value of os.environ['CMAKE_BUILD_TYPE']. If None, the actual build type will be
Reported by Pylint.
caffe2/python/mkl/mkl_fc_op_test.py
17 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLFcTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5),
k=st.integers(1, 5), **mu.gcs)
Reported by Pylint.
Line: 21
Column: 35
@given(n=st.integers(1, 5), m=st.integers(1, 5),
k=st.integers(1, 5), **mu.gcs)
def test_mkl_fc(self,n, m, k, gc, dc):
X = np.random.rand(m, k).astype(np.float32) - 0.5
W = np.random.rand(n, k).astype(np.float32) - 0.5
b = np.random.rand(n).astype(np.float32) - 0.5
op = core.CreateOperator(
Reported by Pylint.
Line: 36
Column: 5
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
Line: 17
Column: 1
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLFcTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5),
k=st.integers(1, 5), **mu.gcs)
def test_mkl_fc(self,n, m, k, gc, dc):
X = np.random.rand(m, k).astype(np.float32) - 0.5
Reported by Pylint.
Line: 20
Column: 5
class MKLFcTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5),
k=st.integers(1, 5), **mu.gcs)
def test_mkl_fc(self,n, m, k, gc, dc):
X = np.random.rand(m, k).astype(np.float32) - 0.5
W = np.random.rand(n, k).astype(np.float32) - 0.5
b = np.random.rand(n).astype(np.float32) - 0.5
Reported by Pylint.
Line: 20
Column: 5
class MKLFcTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5),
k=st.integers(1, 5), **mu.gcs)
def test_mkl_fc(self,n, m, k, gc, dc):
X = np.random.rand(m, k).astype(np.float32) - 0.5
W = np.random.rand(n, k).astype(np.float32) - 0.5
b = np.random.rand(n).astype(np.float32) - 0.5
Reported by Pylint.
Line: 20
Column: 5
class MKLFcTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5),
k=st.integers(1, 5), **mu.gcs)
def test_mkl_fc(self,n, m, k, gc, dc):
X = np.random.rand(m, k).astype(np.float32) - 0.5
W = np.random.rand(n, k).astype(np.float32) - 0.5
b = np.random.rand(n).astype(np.float32) - 0.5
Reported by Pylint.
benchmarks/operator_benchmark/pt/embeddingbag_test.py
17 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
import numpy
from pt import configs
"""EmbeddingBag Operator Benchmark"""
class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
import numpy
from pt import configs
"""EmbeddingBag Operator Benchmark"""
class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):
Reported by Pylint.
Line: 8
Column: 29
"""EmbeddingBag Operator Benchmark"""
class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):
self.embedding = torch.nn.EmbeddingBag(
num_embeddings=embeddingbags,
embedding_dim=dim,
mode=mode,
Reported by Pylint.
Line: 28
Column: 1
def forward(self, input, offset):
return self.embedding(input, offset)
op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)
op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 29
Column: 1
return self.embedding(input, offset)
op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)
op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 6
Column: 1
import numpy
from pt import configs
"""EmbeddingBag Operator Benchmark"""
class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):
self.embedding = torch.nn.EmbeddingBag(
num_embeddings=embeddingbags,
Reported by Pylint.
Line: 10
Column: 9
class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):
self.embedding = torch.nn.EmbeddingBag(
num_embeddings=embeddingbags,
embedding_dim=dim,
mode=mode,
include_last_offset=include_last_offset,
sparse=sparse).to(device=device)
Reported by Pylint.
Line: 18
Column: 9
sparse=sparse).to(device=device)
numpy.random.seed((1 << 32) - 1)
offsets = torch.LongTensor([offset], device=device)
input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long()
self.inputs = {
"input": input,
"offset": torch.cat((offsets, torch.tensor([input.size(0)], dtype=torch.long)), 0)
}
self.set_module_name('embeddingbag')
Reported by Pylint.
Line: 19
Column: 9
numpy.random.seed((1 << 32) - 1)
offsets = torch.LongTensor([offset], device=device)
input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long()
self.inputs = {
"input": input,
"offset": torch.cat((offsets, torch.tensor([input.size(0)], dtype=torch.long)), 0)
}
self.set_module_name('embeddingbag')
Reported by Pylint.
Line: 25
Column: 23
}
self.set_module_name('embeddingbag')
def forward(self, input, offset):
return self.embedding(input, offset)
op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)
op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark)
Reported by Pylint.
caffe2/python/mkl/mkl_LRN_speed_test.py
17 issues
Line: 12
Column: 22
from caffe2.python import core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testLRNSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
Reported by Pylint.
Line: 75
Column: 9
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
if __name__ == '__main__':
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util
Reported by Pylint.
Line: 1
Column: 1
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util
Reported by Pylint.
Line: 13
Column: 1
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testLRNSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 2, 224, 224).astype(np.float32)
Reported by Pylint.
Line: 14
Column: 5
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testLRNSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 2, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
Reported by Pylint.
Line: 14
Column: 5
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testLRNSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 2, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
Reported by Pylint.
Line: 14
Column: 5
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testLRNSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 2, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
Reported by Pylint.
Line: 18
Column: 9
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 2, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
net = core.Net("test")
Reported by Pylint.
Line: 26
Column: 1
net = core.Net("test")
# Makes sure that we can run relu.
net.LRN("X", ["Y", "Y_Scale"], size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW")
net.LRN("X_mkl", ["Y_mkl", "Y_Scale_mkl"], size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW", device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
Reported by Pylint.
caffe2/python/models/seq2seq/beam_search.py
17 issues
Line: 91
Column: 3
def get_timestep(self):
return self.timestep
# TODO: make attentions a generic state
# data_dependencies is a list of blobs that the operator should wait for
# before beginning execution. This ensures that ops are run in the correct
# order when the RecurrentNetwork op is embedded in a DAGNet, for ex.
def apply(
self,
Reported by Pylint.
Line: 1
Column: 1
## @package beam_search
# Module caffe2.python.models.seq2seq.beam_search
from collections import namedtuple
from caffe2.python import core
Reported by Pylint.
Line: 14
Column: 1
from caffe2.python.models.seq2seq.seq2seq_model_helper import Seq2SeqModelHelper
class BeamSearchForwardOnly(object):
"""
Class generalizing forward beam search for seq2seq models.
Also provides types to specify the recurrent structure of decoding:
Reported by Pylint.
Line: 14
Column: 1
from caffe2.python.models.seq2seq.seq2seq_model_helper import Seq2SeqModelHelper
class BeamSearchForwardOnly(object):
"""
Class generalizing forward beam search for seq2seq models.
Also provides types to specify the recurrent structure of decoding:
Reported by Pylint.
Line: 40
Column: 5
['initial_value', 'state_prev_link', 'state_link'],
)
def __init__(
self,
beam_size,
model,
eos_token_id,
go_token_id=seq2seq_util.GO_ID,
Reported by Pylint.
Line: 82
Column: 5
shape=[1, -1],
)
def get_step_model(self):
return self.step_model
def get_previous_tokens(self):
return self.tokens_t_prev_int32_flattened
Reported by Pylint.
Line: 85
Column: 5
def get_step_model(self):
return self.step_model
def get_previous_tokens(self):
return self.tokens_t_prev_int32_flattened
def get_timestep(self):
return self.timestep
Reported by Pylint.
Line: 88
Column: 5
def get_previous_tokens(self):
return self.tokens_t_prev_int32_flattened
def get_timestep(self):
return self.timestep
# TODO: make attentions a generic state
# data_dependencies is a list of blobs that the operator should wait for
# before beginning execution. This ensures that ops are run in the correct
Reported by Pylint.
Line: 95
Column: 5
# data_dependencies is a list of blobs that the operator should wait for
# before beginning execution. This ensures that ops are run in the correct
# order when the RecurrentNetwork op is embedded in a DAGNet, for ex.
def apply(
self,
inputs,
length,
log_probs,
attentions,
Reported by Pylint.
Line: 95
Column: 5
# data_dependencies is a list of blobs that the operator should wait for
# before beginning execution. This ensures that ops are run in the correct
# order when the RecurrentNetwork op is embedded in a DAGNet, for ex.
def apply(
self,
inputs,
length,
log_probs,
attentions,
Reported by Pylint.
caffe2/python/ideep/dropout_op_test.py
17 issues
Line: 7
Column: 1
import unittest
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class DropoutTest(hu.HypothesisTestCase):
@given(X=hu.tensor(),
in_place=st.booleans(),
ratio=st.floats(0, 0.999),
Reported by Pylint.
Line: 1
Column: 1
import unittest
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 16
Column: 1
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class DropoutTest(hu.HypothesisTestCase):
@given(X=hu.tensor(),
in_place=st.booleans(),
ratio=st.floats(0, 0.999),
**mu.gcs)
Reported by Pylint.
Line: 22
Column: 5
in_place=st.booleans(),
ratio=st.floats(0, 0.999),
**mu.gcs)
def test_dropout_is_test(self, X, in_place, ratio, gc, dc):
"""Test with is_test=True for a deterministic reference impl."""
op = core.CreateOperator('Dropout', ['X'],
['X' if in_place else 'Y'],
ratio=ratio, is_test=True)
Reported by Pylint.
Line: 22
Column: 5
in_place=st.booleans(),
ratio=st.floats(0, 0.999),
**mu.gcs)
def test_dropout_is_test(self, X, in_place, ratio, gc, dc):
"""Test with is_test=True for a deterministic reference impl."""
op = core.CreateOperator('Dropout', ['X'],
['X' if in_place else 'Y'],
ratio=ratio, is_test=True)
Reported by Pylint.
Line: 22
Column: 5
in_place=st.booleans(),
ratio=st.floats(0, 0.999),
**mu.gcs)
def test_dropout_is_test(self, X, in_place, ratio, gc, dc):
"""Test with is_test=True for a deterministic reference impl."""
op = core.CreateOperator('Dropout', ['X'],
['X' if in_place else 'Y'],
ratio=ratio, is_test=True)
Reported by Pylint.
Line: 22
Column: 5
in_place=st.booleans(),
ratio=st.floats(0, 0.999),
**mu.gcs)
def test_dropout_is_test(self, X, in_place, ratio, gc, dc):
"""Test with is_test=True for a deterministic reference impl."""
op = core.CreateOperator('Dropout', ['X'],
['X' if in_place else 'Y'],
ratio=ratio, is_test=True)
Reported by Pylint.
Line: 24
Column: 9
**mu.gcs)
def test_dropout_is_test(self, X, in_place, ratio, gc, dc):
"""Test with is_test=True for a deterministic reference impl."""
op = core.CreateOperator('Dropout', ['X'],
['X' if in_place else 'Y'],
ratio=ratio, is_test=True)
self.assertDeviceChecks(dc, op, [X], [0])
# No sense in checking gradients for test phase
Reported by Pylint.