The following issues were found
test/distributed/elastic/rendezvous/api_test.py
41 issues
Line: 10
Column: 1
from typing import Any, Dict, SupportsInt, Tuple, cast
from unittest import TestCase
from torch.distributed import Store
from torch.distributed.elastic.rendezvous import (
RendezvousHandler,
RendezvousHandlerRegistry,
RendezvousParameters,
)
Reported by Pylint.
Line: 11
Column: 1
from unittest import TestCase
from torch.distributed import Store
from torch.distributed.elastic.rendezvous import (
RendezvousHandler,
RendezvousHandlerRegistry,
RendezvousParameters,
)
Reported by Pylint.
Line: 237
Column: 32
def test_register_raises_error_if_called_twice_with_different_creators(self) -> None:
self._registry.register("dummy_backend", self._create_handler)
other_create_handler = lambda p: _DummyRendezvousHandler(p) # noqa: E731
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous backend 'dummy_backend' cannot be registered with "
rf"'{other_create_handler}' as it is already registered with '{self._create_handler}'.$",
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, SupportsInt, Tuple, cast
from unittest import TestCase
Reported by Pylint.
Line: 18
Column: 1
)
class RendezvousParametersTest(TestCase):
def setUp(self) -> None:
self._backend = "dummy_backend"
self._endpoint = "dummy_endpoint"
self._run_id = "dummy_run_id"
self._min_nodes = 3
Reported by Pylint.
Line: 37
Column: 5
**self._kwargs,
)
def test_init_initializes_params(self) -> None:
self._kwargs["dummy_param"] = "x"
params = self._create_params()
self.assertEqual(params.backend, self._backend)
Reported by Pylint.
Line: 50
Column: 5
self.assertEqual(params.get("dummy_param"), "x")
def test_init_initializes_params_if_min_nodes_equals_to_1(self) -> None:
self._min_nodes = 1
params = self._create_params()
self.assertEqual(params.min_nodes, self._min_nodes)
Reported by Pylint.
Line: 58
Column: 5
self.assertEqual(params.min_nodes, self._min_nodes)
self.assertEqual(params.max_nodes, self._max_nodes)
def test_init_initializes_params_if_min_and_max_nodes_are_equal(self) -> None:
self._max_nodes = 3
params = self._create_params()
self.assertEqual(params.min_nodes, self._min_nodes)
Reported by Pylint.
Line: 66
Column: 5
self.assertEqual(params.min_nodes, self._min_nodes)
self.assertEqual(params.max_nodes, self._max_nodes)
def test_init_raises_error_if_backend_is_none_or_empty(self) -> None:
for backend in [None, ""]:
with self.subTest(backend=backend):
self._backend = backend # type: ignore[assignment]
with self.assertRaisesRegex(
Reported by Pylint.
Line: 77
Column: 5
):
self._create_params()
def test_init_raises_error_if_min_nodes_is_less_than_1(self) -> None:
for min_nodes in [0, -1, -5]:
with self.subTest(min_nodes=min_nodes):
self._min_nodes = min_nodes
with self.assertRaisesRegex(
Reported by Pylint.
test/fx/test_future.py
41 issues
Line: 2
Column: 1
from __future__ import annotations # type: ignore[attr-defined]
import torch
import typing
from torch.fx import symbolic_trace
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
Reported by Pylint.
Line: 4
Column: 1
from __future__ import annotations # type: ignore[attr-defined]
import torch
import typing
from torch.fx import symbolic_trace
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
Reported by Pylint.
Line: 7
Column: 24
from torch.fx import symbolic_trace
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
# No forward references
class M1(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
Reported by Pylint.
Line: 12
Column: 23
# No forward references
class M1(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
# Forward references
class M2(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
Reported by Pylint.
Line: 17
Column: 23
# Forward references
class M2(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
# Non-torch annotation with no internal forward references
class M3(torch.nn.Module):
def forward(self, x: typing.List[torch.Tensor], a: A) -> torch.Tensor:
Reported by Pylint.
Line: 22
Column: 23
# Non-torch annotation with no internal forward references
class M3(torch.nn.Module):
def forward(self, x: typing.List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
# Non-torch annotation with internal forward references
class M4(torch.nn.Module):
def forward(self, x: typing.List['torch.Tensor'], a: A) -> 'torch.Tensor':
Reported by Pylint.
Line: 27
Column: 23
# Non-torch annotation with internal forward references
class M4(torch.nn.Module):
def forward(self, x: typing.List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x[0])
x = torch.rand(2, 3)
ref = torch.add(x, x)
Reported by Pylint.
Line: 1
Column: 1
from __future__ import annotations # type: ignore[attr-defined]
import torch
import typing
from torch.fx import symbolic_trace
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
Reported by Pylint.
Line: 3
Column: 1
from __future__ import annotations # type: ignore[attr-defined]
import torch
import typing
from torch.fx import symbolic_trace
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
Reported by Pylint.
Line: 6
Column: 1
import typing
from torch.fx import symbolic_trace
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
# No forward references
class M1(torch.nn.Module):
Reported by Pylint.
test/distributed/elastic/multiprocessing/errors/api_test.py
41 issues
Line: 10
Column: 1
import unittest
from unittest import mock
from torch.distributed.elastic.multiprocessing.errors import (
ChildFailedError,
ProcessFailure,
record,
)
from torch.distributed.elastic.multiprocessing.errors.error_handler import _write_error
Reported by Pylint.
Line: 15
Column: 1
ProcessFailure,
record,
)
from torch.distributed.elastic.multiprocessing.errors.error_handler import _write_error
from torch.testing._internal.common_utils import TEST_WITH_TSAN
class SentinelError(Exception):
# exists so that we can validate that
Reported by Pylint.
Line: 16
Column: 1
record,
)
from torch.distributed.elastic.multiprocessing.errors.error_handler import _write_error
from torch.testing._internal.common_utils import TEST_WITH_TSAN
class SentinelError(Exception):
# exists so that we can validate that
# the correct error is raised and propagated
Reported by Pylint.
Line: 49
Column: 47
if TEST_WITH_TSAN:
print("test incompatible with tsan", file=sys.stderr)
sys.exit(0)
class ApiTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp(prefix=self.__class__.__name__)
Reported by Pylint.
Line: 50
Column: 5
if TEST_WITH_TSAN:
print("test incompatible with tsan", file=sys.stderr)
sys.exit(0)
class ApiTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp(prefix=self.__class__.__name__)
self.test_error_file = os.path.join(self.test_dir, "error.json")
Reported by Pylint.
Line: 132
Column: 9
ex = ChildFailedError("trainer.par", {0: pf0, 1: pf1, 2: pf2})
self.assertEqual(pf0, ex.get_first_failure()[1])
# print is intentional and should prints something like this:
"""
*********************************************
trainer.par FAILED
=============================================
Root Cause:
[0]:
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
import json
import os
import shutil
import signal
import tempfile
import unittest
from unittest import mock
Reported by Pylint.
Line: 19
Column: 1
from torch.testing._internal.common_utils import TEST_WITH_TSAN
class SentinelError(Exception):
# exists so that we can validate that
# the correct error is raised and propagated
pass
Reported by Pylint.
Line: 26
Column: 1
@record
def raise_exception_fn():
raise SentinelError("foobar")
@record
def good_fn():
Reported by Pylint.
Line: 31
Column: 1
@record
def good_fn():
print("hello world")
@record
def raise_child_failure_error_fn(name, child_error_file=""):
Reported by Pylint.
caffe2/python/operator_test/one_hot_ops_test.py
41 issues
Line: 8
Column: 1
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 11
Column: 1
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
def _one_hots():
index_size = st.integers(min_value=1, max_value=5)
Reported by Pylint.
Line: 35
Column: 41
min_dim=2, max_dim=2, dtype=np.int32,
elements=st.integers(min_value=0, max_value=10)),
**hu.gcs_cpu_only)
def test_batch_one_hot(self, x, gc, dc):
d = x.shape[1]
lens = []
vals = []
for i in range(0, d):
val = np.unique(x[:, i])
Reported by Pylint.
Line: 67
Column: 58
seed=st.integers(min_value=0, max_value=1000),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_batch_bucketized_one_hot(self, x, seed, gc, dc):
np.random.seed(seed)
d = x.shape[1]
lens = np.random.randint(low=1, high=5, size=d)
boundaries = []
for i in range(d):
Reported by Pylint.
Line: 119
Column: 58
elements=st.integers(min_value=0, max_value=42)),
end_padding=st.integers(min_value=0, max_value=2),
**hu.gcs)
def test_one_hot(self, hot_indices, end_padding, gc, dc):
def one_hot_ref(hot_indices, size):
out = np.zeros([len(hot_indices), size], dtype=float)
x = enumerate(hot_indices)
for i, x in enumerate(hot_indices):
Reported by Pylint.
Line: 172
Column: 66
elements=st.integers(min_value=-5, max_value=5)),
seed=st.integers(min_value=0, max_value=1000),
**hu.gcs_cpu_only)
def test_batch_bucket_one_hot_shape_inference(self, x, seed, gc, dc):
np.random.seed(seed)
d = x.shape[1]
lens = np.random.randint(low=1, high=5, size=d)
boundaries = []
for i in range(d):
Reported by Pylint.
Line: 172
Column: 70
elements=st.integers(min_value=-5, max_value=5)),
seed=st.integers(min_value=0, max_value=1000),
**hu.gcs_cpu_only)
def test_batch_bucket_one_hot_shape_inference(self, x, seed, gc, dc):
np.random.seed(seed)
d = x.shape[1]
lens = np.random.randint(low=1, high=5, size=d)
boundaries = []
for i in range(d):
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 29
Column: 1
max_size=sum(x[1]))))
class TestOneHotOps(serial.SerializedTestCase):
@serial.given(
x=hu.tensor(
min_dim=2, max_dim=2, dtype=np.int32,
elements=st.integers(min_value=0, max_value=10)),
**hu.gcs_cpu_only)
Reported by Pylint.
Line: 35
Column: 5
min_dim=2, max_dim=2, dtype=np.int32,
elements=st.integers(min_value=0, max_value=10)),
**hu.gcs_cpu_only)
def test_batch_one_hot(self, x, gc, dc):
d = x.shape[1]
lens = []
vals = []
for i in range(0, d):
val = np.unique(x[:, i])
Reported by Pylint.
torch/nn/intrinsic/modules/fused.py
41 issues
Line: 1
Column: 1
import torch
from torch.nn import Conv1d, Conv2d, Conv3d, ReLU, Linear, BatchNorm1d, BatchNorm2d, BatchNorm3d
# Used for identifying intrinsic modules used in quantization
class _FusedModule(torch.nn.Sequential):
pass
class ConvReLU1d(_FusedModule):
r"""This is a sequential container which calls the Conv1d and ReLU modules.
Reported by Pylint.
Line: 12
Column: 16
r"""This is a sequential container which calls the Conv1d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv1d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super().__init__(conv, relu)
class ConvReLU2d(_FusedModule):
Reported by Pylint.
Line: 12
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
r"""This is a sequential container which calls the Conv1d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv1d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super().__init__(conv, relu)
class ConvReLU2d(_FusedModule):
Reported by Bandit.
Line: 12
Column: 41
r"""This is a sequential container which calls the Conv1d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv1d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super().__init__(conv, relu)
class ConvReLU2d(_FusedModule):
Reported by Pylint.
Line: 21
Column: 16
r"""This is a sequential container which calls the Conv2d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv2d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super().__init__(conv, relu)
class ConvReLU3d(_FusedModule):
Reported by Pylint.
Line: 21
Column: 41
r"""This is a sequential container which calls the Conv2d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv2d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super().__init__(conv, relu)
class ConvReLU3d(_FusedModule):
Reported by Pylint.
Line: 21
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
r"""This is a sequential container which calls the Conv2d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv2d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super().__init__(conv, relu)
class ConvReLU3d(_FusedModule):
Reported by Bandit.
Line: 30
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
r"""This is a sequential container which calls the Conv3d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv3d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super().__init__(conv, relu)
class LinearReLU(_FusedModule):
Reported by Bandit.
Line: 30
Column: 16
r"""This is a sequential container which calls the Conv3d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv3d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super().__init__(conv, relu)
class LinearReLU(_FusedModule):
Reported by Pylint.
Line: 30
Column: 41
r"""This is a sequential container which calls the Conv3d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv3d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super().__init__(conv, relu)
class LinearReLU(_FusedModule):
Reported by Pylint.
test/test_jit_string.py
41 issues
Line: 1
Column: 1
from test_jit import JitTestCase
from torch.testing._internal.common_utils import run_tests
from typing import List, Tuple
class TestScript(JitTestCase):
def test_str_ops(self):
def test_str_is(s: str) -> Tuple[bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool]:
return s.isupper(), s.islower(), s.isdigit(), s.isspace(), \
Reported by Pylint.
Line: 2
Column: 1
from test_jit import JitTestCase
from torch.testing._internal.common_utils import run_tests
from typing import List, Tuple
class TestScript(JitTestCase):
def test_str_ops(self):
def test_str_is(s: str) -> Tuple[bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool]:
return s.isupper(), s.islower(), s.isdigit(), s.isspace(), \
Reported by Pylint.
Line: 82
Column: 13
with self.assertRaises(Exception):
test_str_center_error("error")
test_ljust("error")
def test_count() -> Tuple[int, int, int, int, int, int, int, int, int, int, int, int]:
return (
"hello".count("h"),
"hello".count("h", 0, 1),
Reported by Pylint.
Line: 50
Column: 9
def test_ljust_fc(s: str, i: int, fc: str) -> str:
return s.ljust(i, fc)
def test_ljust_fc_err(s: str) -> str:
return s.ljust(10, '**')
def test_rjust(s: str, i: int) -> str:
return s.rjust(i)
Reported by Pylint.
Line: 59
Column: 9
def test_rjust_fc(s: str, i: int, fc: str) -> str:
return s.rjust(i, fc)
def test_rjust_fc_err(s: str) -> str:
return s.rjust(10, '**')
def test_zfill(s: str, i: int) -> str:
return s.zfill(i)
Reported by Pylint.
Line: 65
Column: 13
def test_zfill(s: str, i: int) -> str:
return s.zfill(i)
for input in inputs:
self.checkScript(test_str_is, (input,))
self.checkScript(test_str_to, (input,))
self.checkScript(test_str_strip, (input,))
for char_set in ["abc", "123", " ", "\t"]:
self.checkScript(test_str_strip_char_set, (input, char_set))
Reported by Pylint.
Line: 1
Column: 1
from test_jit import JitTestCase
from torch.testing._internal.common_utils import run_tests
from typing import List, Tuple
class TestScript(JitTestCase):
def test_str_ops(self):
def test_str_is(s: str) -> Tuple[bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool]:
return s.isupper(), s.islower(), s.isdigit(), s.isspace(), \
Reported by Pylint.
Line: 2
Column: 1
from test_jit import JitTestCase
from torch.testing._internal.common_utils import run_tests
from typing import List, Tuple
class TestScript(JitTestCase):
def test_str_ops(self):
def test_str_is(s: str) -> Tuple[bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool]:
return s.isupper(), s.islower(), s.isdigit(), s.isspace(), \
Reported by Pylint.
Line: 4
Column: 1
from test_jit import JitTestCase
from torch.testing._internal.common_utils import run_tests
from typing import List, Tuple
class TestScript(JitTestCase):
def test_str_ops(self):
def test_str_is(s: str) -> Tuple[bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool]:
return s.isupper(), s.islower(), s.isdigit(), s.isspace(), \
Reported by Pylint.
Line: 6
Column: 1
from typing import List, Tuple
class TestScript(JitTestCase):
def test_str_ops(self):
def test_str_is(s: str) -> Tuple[bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool]:
return s.isupper(), s.islower(), s.isdigit(), s.isspace(), \
s.isalnum(), s.isalpha(), s.isdecimal(), s.isnumeric(), \
s.isidentifier(), s.istitle(), s.isprintable()
Reported by Pylint.
benchmarks/distributed/rpc/parameter_server/launcher.py
41 issues
Line: 20
Column: 1
trainer_map,
)
import torch
import torch.distributed as c10d
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.futures import wait_all
Reported by Pylint.
Line: 21
Column: 1
)
import torch
import torch.distributed as c10d
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.futures import wait_all
from torch.utils.data import DataLoader
Reported by Pylint.
Line: 22
Column: 1
import torch
import torch.distributed as c10d
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.futures import wait_all
from torch.utils.data import DataLoader
Reported by Pylint.
Line: 23
Column: 1
import torch
import torch.distributed as c10d
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.futures import wait_all
from torch.utils.data import DataLoader
Reported by Pylint.
Line: 24
Column: 1
import torch.distributed as c10d
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.futures import wait_all
from torch.utils.data import DataLoader
def get_name(rank, args):
Reported by Pylint.
Line: 25
Column: 1
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.futures import wait_all
from torch.utils.data import DataLoader
def get_name(rank, args):
r"""
Reported by Pylint.
Line: 26
Column: 1
import torch.multiprocessing as mp
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.futures import wait_all
from torch.utils.data import DataLoader
def get_name(rank, args):
r"""
A function that gets the name for the rank
Reported by Pylint.
Line: 29
Column: 20
from torch.utils.data import DataLoader
def get_name(rank, args):
r"""
A function that gets the name for the rank
argument
Args:
rank (int): process number in the world
Reported by Pylint.
Line: 47
Column: 21
return "master"
def get_server_rank(args, rank):
r"""
A function that gets the server rank for
the rank argument.
Args:
args (parser): benchmark configurations
Reported by Pylint.
Line: 60
Column: 26
return rank // tps + s_offset
def get_cuda_server_rank(args, rank):
r"""
A function that gets the cudaserver rank for
the rank argument.
Args:
args (parser): benchmark configurations
Reported by Pylint.
caffe2/contrib/fakelowp/test/test_layernorm_nnpi_fp16.py
40 issues
Line: 2
Column: 1
import numpy as np
import caffe2.python.fakelowp.init_shared_libs # noqa
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
from hypothesis import given, settings
from hypothesis import strategies as st
Reported by Pylint.
Line: 3
Column: 1
import numpy as np
import caffe2.python.fakelowp.init_shared_libs # noqa
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
from hypothesis import given, settings
from hypothesis import strategies as st
Reported by Pylint.
Line: 4
Column: 1
import numpy as np
import caffe2.python.fakelowp.init_shared_libs # noqa
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
from hypothesis import given, settings
from hypothesis import strategies as st
Reported by Pylint.
Line: 5
Column: 1
import caffe2.python.fakelowp.init_shared_libs # noqa
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
from hypothesis import given, settings
from hypothesis import strategies as st
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 6
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
from hypothesis import given, settings
from hypothesis import strategies as st
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
Reported by Pylint.
Line: 7
Column: 1
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
from hypothesis import given, settings
from hypothesis import strategies as st
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
from hypothesis import given, settings
from hypothesis import strategies as st
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
core.GlobalInit(["caffe2",
Reported by Pylint.
Line: 9
Column: 1
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
from hypothesis import given, settings
from hypothesis import strategies as st
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
core.GlobalInit(["caffe2",
"--glow_global_fp16=1",
Reported by Pylint.
Line: 10
Column: 1
from caffe2.python.fakelowp.test_utils import print_test_debug_info
from hypothesis import given, settings
from hypothesis import strategies as st
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
core.GlobalInit(["caffe2",
"--glow_global_fp16=1",
"--glow_global_fused_scale_offset_fp16=1",
Reported by Pylint.
Line: 2
Column: 1
import numpy as np
import caffe2.python.fakelowp.init_shared_libs # noqa
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
from hypothesis import given, settings
from hypothesis import strategies as st
Reported by Pylint.
torch/fx/experimental/unification/multipledispatch/dispatcher.py
40 issues
Line: 3
Column: 1
from warnings import warn
import inspect
from .conflict import ordering, ambiguities, super_signature, AmbiguityWarning
from .utils import expand_tuples
from .variadic import Variadic, isvariadic
import itertools as itl
class MDNotImplementedError(NotImplementedError):
Reported by Pylint.
Line: 4
Column: 1
from warnings import warn
import inspect
from .conflict import ordering, ambiguities, super_signature, AmbiguityWarning
from .utils import expand_tuples
from .variadic import Variadic, isvariadic
import itertools as itl
class MDNotImplementedError(NotImplementedError):
Reported by Pylint.
Line: 5
Column: 1
import inspect
from .conflict import ordering, ambiguities, super_signature, AmbiguityWarning
from .utils import expand_tuples
from .variadic import Variadic, isvariadic
import itertools as itl
class MDNotImplementedError(NotImplementedError):
""" A NotImplementedError for multiple dispatch """
Reported by Pylint.
Line: 13
Column: 32
""" A NotImplementedError for multiple dispatch """
def ambiguity_warn(dispatcher, ambiguities):
""" Raise warning when ambiguity is detected
Parameters
----------
dispatcher : Dispatcher
The dispatcher on which the ambiguity was detected
Reported by Pylint.
Line: 37
Column: 22
)
def restart_ordering(on_ambiguity=ambiguity_warn):
"""Deprecated interface to temporarily resume ordering.
"""
warn(
'restart_ordering is deprecated, if you would like to eagerly order'
'the dispatchers, you should call the ``reorder()`` method on each'
Reported by Pylint.
Line: 75
Column: 5
# we're not matching a variadic argument, so move to the next
# element in the signature
sig = next(sigiter)
else:
try:
sig = next(sigiter)
except StopIteration:
assert isvariadic(sig)
yield True
Reported by Pylint.
Line: 241
Column: 9
return self.reorder()
def reorder(self, on_ambiguity=ambiguity_warn):
self._ordering = od = ordering(self.funcs)
amb = ambiguities(self.funcs)
if amb:
on_ambiguity(self, amb)
return od
Reported by Pylint.
Line: 254
Column: 17
except KeyError:
func = self.dispatch(*types)
if not func:
raise NotImplementedError(
'Could not find signature for %s: <%s>' %
(self.name, str_signature(types)))
self._cache[types] = func
try:
return func(*args, **kwargs)
Reported by Pylint.
Line: 270
Column: 13
except MDNotImplementedError:
pass
raise NotImplementedError(
"Matching functions for "
"%s: <%s> found, but none completed successfully" % (
self.name, str_signature(types),),)
def __str__(self):
Reported by Pylint.
Line: 333
Column: 9
def __setstate__(self, d):
self.name = d['name']
self.funcs = d['funcs']
self._ordering = ordering(self.funcs)
self._cache = dict()
@property
def __doc__(self):
docs = ["Multiply dispatched method: %s" % self.name]
Reported by Pylint.
caffe2/python/operator_test/feature_maps_ops_test.py
40 issues
Line: 1
Column: 1
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
class TestFeatureMapsOps(TestCase):
def test_merge_dense_feature_tensors(self):
op = core.CreateOperator(
"MergeDenseFeatureTensors",
[
Reported by Pylint.
Line: 12
Column: 5
class TestFeatureMapsOps(TestCase):
def test_merge_dense_feature_tensors(self):
op = core.CreateOperator(
"MergeDenseFeatureTensors",
[
"in1", "in1_presence",
],
Reported by Pylint.
Line: 12
Column: 5
class TestFeatureMapsOps(TestCase):
def test_merge_dense_feature_tensors(self):
op = core.CreateOperator(
"MergeDenseFeatureTensors",
[
"in1", "in1_presence",
],
Reported by Pylint.
Line: 13
Column: 9
class TestFeatureMapsOps(TestCase):
def test_merge_dense_feature_tensors(self):
op = core.CreateOperator(
"MergeDenseFeatureTensors",
[
"in1", "in1_presence",
],
[
Reported by Pylint.
Line: 49
Column: 5
)
def test_merge_single_scalar_feature_tensors(self):
op = core.CreateOperator(
"MergeSingleScalarFeatureTensors",
[
"in1", "in1_presence",
"in2", "in2_presence",
Reported by Pylint.
Line: 49
Column: 5
)
def test_merge_single_scalar_feature_tensors(self):
op = core.CreateOperator(
"MergeSingleScalarFeatureTensors",
[
"in1", "in1_presence",
"in2", "in2_presence",
Reported by Pylint.
Line: 50
Column: 9
def test_merge_single_scalar_feature_tensors(self):
op = core.CreateOperator(
"MergeSingleScalarFeatureTensors",
[
"in1", "in1_presence",
"in2", "in2_presence",
],
Reported by Pylint.
Line: 96
Column: 5
np.array([11.1, 12.1, 12.2], dtype=np.float)
)
def test_merge_single_scalar_feature_tensors_gradient(self):
op = core.CreateOperator(
"MergeSingleScalarFeatureTensorsGradient",
[
"in1_presence",
"in2_presence",
Reported by Pylint.
Line: 96
Column: 5
np.array([11.1, 12.1, 12.2], dtype=np.float)
)
def test_merge_single_scalar_feature_tensors_gradient(self):
op = core.CreateOperator(
"MergeSingleScalarFeatureTensorsGradient",
[
"in1_presence",
"in2_presence",
Reported by Pylint.