The following issues were found
caffe2/python/operator_test/order_switch_test.py
16 issues
Line: 4
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
from caffe2.python import core, utils
from hypothesis import given, settings
class OrderSwitchOpsTest(hu.HypothesisTestCase):
Reported by Pylint.
Line: 6
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
from caffe2.python import core, utils
from hypothesis import given, settings
class OrderSwitchOpsTest(hu.HypothesisTestCase):
@given(
X=hu.tensor(min_dim=3, max_dim=5, min_value=1, max_value=5),
Reported by Pylint.
Line: 1
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
from caffe2.python import core, utils
from hypothesis import given, settings
class OrderSwitchOpsTest(hu.HypothesisTestCase):
Reported by Pylint.
Line: 9
Column: 1
from hypothesis import given, settings
class OrderSwitchOpsTest(hu.HypothesisTestCase):
@given(
X=hu.tensor(min_dim=3, max_dim=5, min_value=1, max_value=5),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs
)
Reported by Pylint.
Line: 15
Column: 5
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs
)
@settings(deadline=10000)
def test_nchw2nhwc(self, X, engine, gc, dc):
op = core.CreateOperator("NCHW2NHWC", ["X"], ["Y"], engine=engine)
def nchw2nhwc_ref(X):
return (utils.NCHW2NHWC(X),)
Reported by Pylint.
Line: 15
Column: 5
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs
)
@settings(deadline=10000)
def test_nchw2nhwc(self, X, engine, gc, dc):
op = core.CreateOperator("NCHW2NHWC", ["X"], ["Y"], engine=engine)
def nchw2nhwc_ref(X):
return (utils.NCHW2NHWC(X),)
Reported by Pylint.
Line: 15
Column: 5
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs
)
@settings(deadline=10000)
def test_nchw2nhwc(self, X, engine, gc, dc):
op = core.CreateOperator("NCHW2NHWC", ["X"], ["Y"], engine=engine)
def nchw2nhwc_ref(X):
return (utils.NCHW2NHWC(X),)
Reported by Pylint.
Line: 15
Column: 5
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs
)
@settings(deadline=10000)
def test_nchw2nhwc(self, X, engine, gc, dc):
op = core.CreateOperator("NCHW2NHWC", ["X"], ["Y"], engine=engine)
def nchw2nhwc_ref(X):
return (utils.NCHW2NHWC(X),)
Reported by Pylint.
Line: 17
Column: 9
)
@settings(deadline=10000)
def test_nchw2nhwc(self, X, engine, gc, dc):
op = core.CreateOperator("NCHW2NHWC", ["X"], ["Y"], engine=engine)
def nchw2nhwc_ref(X):
return (utils.NCHW2NHWC(X),)
self.assertReferenceChecks(gc, op, [X], nchw2nhwc_ref)
Reported by Pylint.
Line: 19
Column: 9
def test_nchw2nhwc(self, X, engine, gc, dc):
op = core.CreateOperator("NCHW2NHWC", ["X"], ["Y"], engine=engine)
def nchw2nhwc_ref(X):
return (utils.NCHW2NHWC(X),)
self.assertReferenceChecks(gc, op, [X], nchw2nhwc_ref)
self.assertGradientChecks(gc, op, [X], 0, [0])
self.assertDeviceChecks(dc, op, [X], [0])
Reported by Pylint.
caffe2/python/operator_test/lpnorm_op_test.py
16 issues
Line: 9
Column: 1
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given, settings
import hypothesis.strategies as st
class LpnormTest(hu.HypothesisTestCase):
@given(inputs=hu.tensors(n=1,
Reported by Pylint.
Line: 10
Column: 1
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given, settings
import hypothesis.strategies as st
class LpnormTest(hu.HypothesisTestCase):
@given(inputs=hu.tensors(n=1,
min_dim=1,
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given, settings
Reported by Pylint.
Line: 13
Column: 1
import hypothesis.strategies as st
class LpnormTest(hu.HypothesisTestCase):
@given(inputs=hu.tensors(n=1,
min_dim=1,
max_dim=3,
dtype=np.float32),
**hu.gcs)
Reported by Pylint.
Line: 20
Column: 5
dtype=np.float32),
**hu.gcs)
@settings(deadline=10000)
def test_Lp_Norm(self, inputs, gc, dc):
X = inputs[0]
# avoid kinks by moving away from 0
X += 0.02 * np.sign(X)
X[X == 0.0] += 0.02
self.ws.create_blob("X").feed(X)
Reported by Pylint.
Line: 20
Column: 5
dtype=np.float32),
**hu.gcs)
@settings(deadline=10000)
def test_Lp_Norm(self, inputs, gc, dc):
X = inputs[0]
# avoid kinks by moving away from 0
X += 0.02 * np.sign(X)
X[X == 0.0] += 0.02
self.ws.create_blob("X").feed(X)
Reported by Pylint.
Line: 20
Column: 5
dtype=np.float32),
**hu.gcs)
@settings(deadline=10000)
def test_Lp_Norm(self, inputs, gc, dc):
X = inputs[0]
# avoid kinks by moving away from 0
X += 0.02 * np.sign(X)
X[X == 0.0] += 0.02
self.ws.create_blob("X").feed(X)
Reported by Pylint.
Line: 20
Column: 5
dtype=np.float32),
**hu.gcs)
@settings(deadline=10000)
def test_Lp_Norm(self, inputs, gc, dc):
X = inputs[0]
# avoid kinks by moving away from 0
X += 0.02 * np.sign(X)
X[X == 0.0] += 0.02
self.ws.create_blob("X").feed(X)
Reported by Pylint.
Line: 21
Column: 9
**hu.gcs)
@settings(deadline=10000)
def test_Lp_Norm(self, inputs, gc, dc):
X = inputs[0]
# avoid kinks by moving away from 0
X += 0.02 * np.sign(X)
X[X == 0.0] += 0.02
self.ws.create_blob("X").feed(X)
op = core.CreateOperator(
Reported by Pylint.
Line: 23
Column: 9
def test_Lp_Norm(self, inputs, gc, dc):
X = inputs[0]
# avoid kinks by moving away from 0
X += 0.02 * np.sign(X)
X[X == 0.0] += 0.02
self.ws.create_blob("X").feed(X)
op = core.CreateOperator(
'LpNorm',
['X'],
Reported by Pylint.
caffe2/python/operator_test/square_root_divide_op_test.py
16 issues
Line: 8
Column: 1
from caffe2.python import core
from functools import partial
from hypothesis import strategies as st
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import math
import numpy as np
Reported by Pylint.
Line: 19
Column: 27
def _data_and_scale(
data_min_size=4, data_max_size=10,
examples_min_number=1, examples_max_number=4,
dtype=np.float32, elements=None):
params_ = st.tuples(
st.integers(min_value=examples_min_number,
max_value=examples_max_number),
st.integers(min_value=data_min_size,
max_value=data_max_size),
Reported by Pylint.
Line: 53
Column: 23
return (output, )
def grad(output_grad, ref_outputs, inputs):
return (divide_by_square_root(output_grad, inputs[1])[0],
None)
class TestSquareRootDivide(serial.SerializedTestCase):
Reported by Pylint.
Line: 61
Column: 59
class TestSquareRootDivide(serial.SerializedTestCase):
@serial.given(data_and_scale=_data_and_scale(),
**hu.gcs_cpu_only)
def test_square_root_divide(self, data_and_scale, gc, dc):
self.assertReferenceChecks(
device_option=gc,
op=core.CreateOperator("SquareRootDivide",
["data", "scale"],
["output"]),
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from functools import partial
from hypothesis import strategies as st
Reported by Pylint.
Line: 7
Column: 1
from caffe2.python import core
from functools import partial
from hypothesis import strategies as st
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import math
Reported by Pylint.
Line: 12
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import math
import numpy as np
def _data_and_scale(
data_min_size=4, data_max_size=10,
Reported by Pylint.
Line: 16
Column: 1
import numpy as np
def _data_and_scale(
data_min_size=4, data_max_size=10,
examples_min_number=1, examples_max_number=4,
dtype=np.float32, elements=None):
params_ = st.tuples(
st.integers(min_value=examples_min_number,
Reported by Pylint.
Line: 39
Column: 1
)
def divide_by_square_root(data, scale):
output = np.copy(data)
num_examples = len(scale)
assert num_examples == data.shape[0]
assert len(data.shape) == 2
Reported by Pylint.
Line: 43
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
output = np.copy(data)
num_examples = len(scale)
assert num_examples == data.shape[0]
assert len(data.shape) == 2
for i in range(0, num_examples):
if scale[i] > 0:
output[i] = np.multiply(data[i], 1 / math.sqrt(scale[i]))
Reported by Bandit.
caffe2/python/operator_test/enforce_finite_op_test.py
16 issues
Line: 6
Column: 1
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 23
Column: 38
**hu.gcs
)
@settings(deadline=10000)
def test_enforce_finite(self, X, gc, dc):
def all_finite_value(X):
if X.size <= 0:
return True
Reported by Pylint.
Line: 23
Column: 42
**hu.gcs
)
@settings(deadline=10000)
def test_enforce_finite(self, X, gc, dc):
def all_finite_value(X):
if X.size <= 0:
return True
Reported by Pylint.
Line: 47
Column: 51
),
**hu.gcs
)
def test_enforce_finite_device_check(self, X, gc, dc):
op = core.CreateOperator(
"EnforceFinite",
["X"],
[],
)
Reported by Pylint.
Line: 1
Column: 1
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
Reported by Pylint.
Line: 13
Column: 1
import caffe2.python.hypothesis_test_util as hu
class TestEnforceFinite(hu.HypothesisTestCase):
@given(
X=hu.tensor(
# allow empty
min_value=0,
elements=hu.floats(allow_nan=True, allow_infinity=True),
Reported by Pylint.
Line: 22
Column: 5
),
**hu.gcs
)
@settings(deadline=10000)
def test_enforce_finite(self, X, gc, dc):
def all_finite_value(X):
if X.size <= 0:
return True
Reported by Pylint.
Line: 22
Column: 5
),
**hu.gcs
)
@settings(deadline=10000)
def test_enforce_finite(self, X, gc, dc):
def all_finite_value(X):
if X.size <= 0:
return True
Reported by Pylint.
Line: 22
Column: 5
),
**hu.gcs
)
@settings(deadline=10000)
def test_enforce_finite(self, X, gc, dc):
def all_finite_value(X):
if X.size <= 0:
return True
Reported by Pylint.
Line: 22
Column: 5
),
**hu.gcs
)
@settings(deadline=10000)
def test_enforce_finite(self, X, gc, dc):
def all_finite_value(X):
if X.size <= 0:
return True
Reported by Pylint.
caffe2/python/operator_test/margin_ranking_criterion_op_test.py
16 issues
Line: 10
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
class TestMarginRankingCriterion(serial.SerializedTestCase):
Reported by Pylint.
Line: 11
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
class TestMarginRankingCriterion(serial.SerializedTestCase):
@given(N=st.integers(min_value=10, max_value=20),
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 15
Column: 1
import numpy as np
class TestMarginRankingCriterion(serial.SerializedTestCase):
@given(N=st.integers(min_value=10, max_value=20),
seed=st.integers(min_value=0, max_value=65535),
margin=st.floats(min_value=-0.5, max_value=0.5),
**hu.gcs)
@settings(deadline=10000)
Reported by Pylint.
Line: 21
Column: 5
margin=st.floats(min_value=-0.5, max_value=0.5),
**hu.gcs)
@settings(deadline=10000)
def test_margin_ranking_criterion(self, N, seed, margin, gc, dc):
np.random.seed(seed)
X1 = np.random.randn(N).astype(np.float32)
X2 = np.random.randn(N).astype(np.float32)
Y = np.random.choice([-1, 1], size=N).astype(np.int32)
op = core.CreateOperator(
Reported by Pylint.
Line: 21
Column: 5
margin=st.floats(min_value=-0.5, max_value=0.5),
**hu.gcs)
@settings(deadline=10000)
def test_margin_ranking_criterion(self, N, seed, margin, gc, dc):
np.random.seed(seed)
X1 = np.random.randn(N).astype(np.float32)
X2 = np.random.randn(N).astype(np.float32)
Y = np.random.choice([-1, 1], size=N).astype(np.int32)
op = core.CreateOperator(
Reported by Pylint.
Line: 21
Column: 5
margin=st.floats(min_value=-0.5, max_value=0.5),
**hu.gcs)
@settings(deadline=10000)
def test_margin_ranking_criterion(self, N, seed, margin, gc, dc):
np.random.seed(seed)
X1 = np.random.randn(N).astype(np.float32)
X2 = np.random.randn(N).astype(np.float32)
Y = np.random.choice([-1, 1], size=N).astype(np.int32)
op = core.CreateOperator(
Reported by Pylint.
Line: 21
Column: 5
margin=st.floats(min_value=-0.5, max_value=0.5),
**hu.gcs)
@settings(deadline=10000)
def test_margin_ranking_criterion(self, N, seed, margin, gc, dc):
np.random.seed(seed)
X1 = np.random.randn(N).astype(np.float32)
X2 = np.random.randn(N).astype(np.float32)
Y = np.random.choice([-1, 1], size=N).astype(np.int32)
op = core.CreateOperator(
Reported by Pylint.
Line: 21
Column: 5
margin=st.floats(min_value=-0.5, max_value=0.5),
**hu.gcs)
@settings(deadline=10000)
def test_margin_ranking_criterion(self, N, seed, margin, gc, dc):
np.random.seed(seed)
X1 = np.random.randn(N).astype(np.float32)
X2 = np.random.randn(N).astype(np.float32)
Y = np.random.choice([-1, 1], size=N).astype(np.int32)
op = core.CreateOperator(
Reported by Pylint.
Line: 23
Column: 9
@settings(deadline=10000)
def test_margin_ranking_criterion(self, N, seed, margin, gc, dc):
np.random.seed(seed)
X1 = np.random.randn(N).astype(np.float32)
X2 = np.random.randn(N).astype(np.float32)
Y = np.random.choice([-1, 1], size=N).astype(np.int32)
op = core.CreateOperator(
"MarginRankingCriterion", ["X1", "X2", "Y"], ["loss"],
margin=margin)
Reported by Pylint.
test/distributed/pipeline/sync/skip/test_api.py
16 issues
Line: 9
Column: 1
# LICENSE file in the root directory of this source tree.
import copy
from torch import nn
from torch.distributed.pipeline.sync.skip import Namespace, skippable, stash
def test_namespace_difference():
Reported by Pylint.
Line: 11
Column: 1
from torch import nn
from torch.distributed.pipeline.sync.skip import Namespace, skippable, stash
def test_namespace_difference():
ns1 = Namespace()
ns2 = Namespace()
Reported by Pylint.
Line: 1
Column: 1
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import copy
from torch import nn
Reported by Pylint.
Line: 14
Column: 1
from torch.distributed.pipeline.sync.skip import Namespace, skippable, stash
def test_namespace_difference():
ns1 = Namespace()
ns2 = Namespace()
assert ns1 != ns2
Reported by Pylint.
Line: 17
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def test_namespace_difference():
ns1 = Namespace()
ns2 = Namespace()
assert ns1 != ns2
def test_namespace_copy():
ns = Namespace()
assert copy.copy(ns) == ns
Reported by Bandit.
Line: 20
Column: 1
assert ns1 != ns2
def test_namespace_copy():
ns = Namespace()
assert copy.copy(ns) == ns
assert copy.copy(ns) is not ns
Reported by Pylint.
Line: 21
Column: 5
def test_namespace_copy():
ns = Namespace()
assert copy.copy(ns) == ns
assert copy.copy(ns) is not ns
def test_skippable_repr():
Reported by Pylint.
Line: 22
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def test_namespace_copy():
ns = Namespace()
assert copy.copy(ns) == ns
assert copy.copy(ns) is not ns
def test_skippable_repr():
@skippable(stash=["hello"])
Reported by Bandit.
Line: 23
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def test_namespace_copy():
ns = Namespace()
assert copy.copy(ns) == ns
assert copy.copy(ns) is not ns
def test_skippable_repr():
@skippable(stash=["hello"])
class Hello(nn.Module):
Reported by Bandit.
Line: 26
Column: 1
assert copy.copy(ns) is not ns
def test_skippable_repr():
@skippable(stash=["hello"])
class Hello(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
Reported by Pylint.
torch/multiprocessing/__init__.py
16 issues
Line: 18
Column: 1
"""
import torch
import sys
from .reductions import init_reductions
import multiprocessing
__all__ = ['set_sharing_strategy', 'get_sharing_strategy',
'get_all_sharing_strategies']
Reported by Pylint.
Line: 38
Column: 1
"""Add helper function to spawn N processes and wait for completion of any of
them. This depends `mp.get_context` which was added in Python 3.4."""
from .spawn import spawn, SpawnContext, start_processes, ProcessContext, \
ProcessRaisedException, ProcessExitedException
if sys.platform == 'darwin' or sys.platform == 'win32':
_sharing_strategy = 'file_system'
Reported by Pylint.
Line: 25
Column: 1
'get_all_sharing_strategies']
from multiprocessing import * # noqa: F403
__all__ += multiprocessing.__all__ # type: ignore[attr-defined]
Reported by Pylint.
Line: 33
Column: 1
# This call adds a Linux specific prctl(2) wrapper function to this module.
# See https://github.com/pytorch/pytorch/pull/14391 for more information.
torch._C._multiprocessing_init()
"""Add helper function to spawn N processes and wait for completion of any of
them. This depends `mp.get_context` which was added in Python 3.4."""
from .spawn import spawn, SpawnContext, start_processes, ProcessContext, \
Reported by Pylint.
Line: 33
Column: 1
# This call adds a Linux specific prctl(2) wrapper function to this module.
# See https://github.com/pytorch/pytorch/pull/14391 for more information.
torch._C._multiprocessing_init()
"""Add helper function to spawn N processes and wait for completion of any of
them. This depends `mp.get_context` which was added in Python 3.4."""
from .spawn import spawn, SpawnContext, start_processes, ProcessContext, \
Reported by Pylint.
Line: 36
Column: 1
torch._C._multiprocessing_init()
"""Add helper function to spawn N processes and wait for completion of any of
them. This depends `mp.get_context` which was added in Python 3.4."""
from .spawn import spawn, SpawnContext, start_processes, ProcessContext, \
ProcessRaisedException, ProcessExitedException
Reported by Pylint.
Line: 57
Column: 5
new_strategy (str): Name of the selected strategy. Should be one of
the values returned by :func:`get_all_sharing_strategies()`.
"""
global _sharing_strategy
assert new_strategy in _all_sharing_strategies
_sharing_strategy = new_strategy
def get_sharing_strategy():
Reported by Pylint.
Line: 17
Column: 1
contents, and we recommend referring to very good docs of the original module.
"""
import torch
import sys
from .reductions import init_reductions
import multiprocessing
__all__ = ['set_sharing_strategy', 'get_sharing_strategy',
'get_all_sharing_strategies']
Reported by Pylint.
Line: 19
Column: 1
import torch
import sys
from .reductions import init_reductions
import multiprocessing
__all__ = ['set_sharing_strategy', 'get_sharing_strategy',
'get_all_sharing_strategies']
Reported by Pylint.
Line: 25
Column: 1
'get_all_sharing_strategies']
from multiprocessing import * # noqa: F403
__all__ += multiprocessing.__all__ # type: ignore[attr-defined]
Reported by Pylint.
torch/fx/experimental/param_fetch.py
16 issues
Line: 8
Column: 33
# Matching method matches the attribute name of current version to the attribute name of `target_version`
def default_matching(name: str, target_version: int) -> str:
"""Default matching method
"""
return name
# This dict maps the nn.Module class name to the attribute name list that we want to fetch for lowering.
Reported by Pylint.
Line: 38
Column: 22
if type(mod) in module_fetch_book:
version, param_to_fetch, matching_method = module_fetch_book[type(mod)]
if version < mod._version:
raise RuntimeError(f"Fetcher version {version} try to fetch {torch.typename(mod)} version {mod._version}, "
"please upgrade the module_fetch_book, open an issue and @842974287 "
"or report a bug to AIACC team directly.")
for attr in param_to_fetch:
attrs_for_lowering[attr] = getattr(mod, matching_method(attr, mod._version))
Reported by Pylint.
Line: 39
Column: 104
if type(mod) in module_fetch_book:
version, param_to_fetch, matching_method = module_fetch_book[type(mod)]
if version < mod._version:
raise RuntimeError(f"Fetcher version {version} try to fetch {torch.typename(mod)} version {mod._version}, "
"please upgrade the module_fetch_book, open an issue and @842974287 "
"or report a bug to AIACC team directly.")
for attr in param_to_fetch:
attrs_for_lowering[attr] = getattr(mod, matching_method(attr, mod._version))
else:
Reported by Pylint.
Line: 43
Column: 75
"please upgrade the module_fetch_book, open an issue and @842974287 "
"or report a bug to AIACC team directly.")
for attr in param_to_fetch:
attrs_for_lowering[attr] = getattr(mod, matching_method(attr, mod._version))
else:
raise RuntimeError(f"{torch.typename(mod)} is not in the module_fetch_book yet, "
"please add it to the module_fetch_book, open an issue and @842974287 "
"or report a bug to AIACC team directly.")
return attrs_for_lowering
Reported by Pylint.
Line: 1
Column: 1
from torch.fx.graph_module import GraphModule
from typing import Any, Callable, Dict, List, Tuple, Type
import torch
import torch.nn as nn
# Matching method matches the attribute name of current version to the attribute name of `target_version`
def default_matching(name: str, target_version: int) -> str:
"""Default matching method
Reported by Pylint.
Line: 2
Column: 1
from torch.fx.graph_module import GraphModule
from typing import Any, Callable, Dict, List, Tuple, Type
import torch
import torch.nn as nn
# Matching method matches the attribute name of current version to the attribute name of `target_version`
def default_matching(name: str, target_version: int) -> str:
"""Default matching method
Reported by Pylint.
Line: 7
Column: 1
import torch.nn as nn
# Matching method matches the attribute name of current version to the attribute name of `target_version`
def default_matching(name: str, target_version: int) -> str:
"""Default matching method
"""
return name
Reported by Pylint.
Line: 13
Column: 1
"""
return name
# This dict maps the nn.Module class name to the attribute name list that we want to fetch for lowering.
# The first integer in the tuple is the version number of the nn.Module class when we create the parameter list.
# If there's a version mismatch then it means the parameter names in the book might be mismatched with nn.Module.
module_fetch_book: Dict[Type, Tuple[int, List[str], Callable[[str, int], str]]] = {
torch.nn.modules.linear.Linear: (1, ["weight", "bias"], default_matching),
torch.nn.modules.conv.Conv2d: (
Reported by Pylint.
Line: 14
Column: 1
return name
# This dict maps the nn.Module class name to the attribute name list that we want to fetch for lowering.
# The first integer in the tuple is the version number of the nn.Module class when we create the parameter list.
# If there's a version mismatch then it means the parameter names in the book might be mismatched with nn.Module.
module_fetch_book: Dict[Type, Tuple[int, List[str], Callable[[str, int], str]]] = {
torch.nn.modules.linear.Linear: (1, ["weight", "bias"], default_matching),
torch.nn.modules.conv.Conv2d: (
1, ["weight", "bias", "kernel_size", "stride", "padding", "dilation", "groups", "padding_mode"], default_matching
Reported by Pylint.
Line: 15
Column: 1
# This dict maps the nn.Module class name to the attribute name list that we want to fetch for lowering.
# The first integer in the tuple is the version number of the nn.Module class when we create the parameter list.
# If there's a version mismatch then it means the parameter names in the book might be mismatched with nn.Module.
module_fetch_book: Dict[Type, Tuple[int, List[str], Callable[[str, int], str]]] = {
torch.nn.modules.linear.Linear: (1, ["weight", "bias"], default_matching),
torch.nn.modules.conv.Conv2d: (
1, ["weight", "bias", "kernel_size", "stride", "padding", "dilation", "groups", "padding_mode"], default_matching
),
Reported by Pylint.
torch/utils/data/graph.py
16 issues
Line: 14
Column: 3
def stub_unpickler():
return "STUB"
# TODO(VitalyFedyunin): Make sure it works without dill module installed
def list_connected_datapipes(scan_obj):
f = io.BytesIO()
p = pickle.Pickler(f) # Not going to work for lambdas, but dill infinite loops on typing and can't be used as is
Reported by Pylint.
Line: 20
Column: 22
f = io.BytesIO()
p = pickle.Pickler(f) # Not going to work for lambdas, but dill infinite loops on typing and can't be used as is
def stub_pickler(obj):
return stub_unpickler, ()
captured_connections = []
def reduce_hook(obj):
Reported by Pylint.
Line: 20
Column: 5
f = io.BytesIO()
p = pickle.Pickler(f) # Not going to work for lambdas, but dill infinite loops on typing and can't be used as is
def stub_pickler(obj):
return stub_unpickler, ()
captured_connections = []
def reduce_hook(obj):
Reported by Pylint.
Line: 32
Column: 3
captured_connections.append(obj)
return stub_unpickler, ()
# TODO(VitalyFedyunin): Better do it as `with` context for safety
IterableDataset.set_reduce_ex_hook(reduce_hook)
p.dump(scan_obj)
IterableDataset.set_reduce_ex_hook(None)
return captured_connections
Reported by Pylint.
Line: 1
Column: 1
import io
import pickle
from torch.utils.data import IterableDataset
from typing import Any, Dict
reduce_ex_hook = None
Reported by Pylint.
Line: 2
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b403-import-pickle
import io
import pickle
from torch.utils.data import IterableDataset
from typing import Any, Dict
reduce_ex_hook = None
Reported by Bandit.
Line: 6
Column: 1
from torch.utils.data import IterableDataset
from typing import Any, Dict
reduce_ex_hook = None
def stub_unpickler():
Reported by Pylint.
Line: 8
Column: 1
from typing import Any, Dict
reduce_ex_hook = None
def stub_unpickler():
return "STUB"
Reported by Pylint.
Line: 11
Column: 1
reduce_ex_hook = None
def stub_unpickler():
return "STUB"
# TODO(VitalyFedyunin): Make sure it works without dill module installed
def list_connected_datapipes(scan_obj):
Reported by Pylint.
Line: 15
Column: 1
return "STUB"
# TODO(VitalyFedyunin): Make sure it works without dill module installed
def list_connected_datapipes(scan_obj):
f = io.BytesIO()
p = pickle.Pickler(f) # Not going to work for lambdas, but dill infinite loops on typing and can't be used as is
def stub_pickler(obj):
Reported by Pylint.
torch/nn/modules/pixelshuffle.py
16 issues
Line: 1
Column: 1
from .module import Module
from .. import functional as F
from torch import Tensor
class PixelShuffle(Module):
r"""Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor.
Reported by Pylint.
Line: 2
Column: 1
from .module import Module
from .. import functional as F
from torch import Tensor
class PixelShuffle(Module):
r"""Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor.
Reported by Pylint.
Line: 52
Column: 23
super(PixelShuffle, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, input: Tensor) -> Tensor:
return F.pixel_shuffle(input, self.upscale_factor)
def extra_repr(self) -> str:
return 'upscale_factor={}'.format(self.upscale_factor)
Reported by Pylint.
Line: 102
Column: 23
super(PixelUnshuffle, self).__init__()
self.downscale_factor = downscale_factor
def forward(self, input: Tensor) -> Tensor:
return F.pixel_unshuffle(input, self.downscale_factor)
def extra_repr(self) -> str:
return 'downscale_factor={}'.format(self.downscale_factor)
Reported by Pylint.
Line: 1
Column: 1
from .module import Module
from .. import functional as F
from torch import Tensor
class PixelShuffle(Module):
r"""Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor.
Reported by Pylint.
Line: 4
Column: 1
from .module import Module
from .. import functional as F
from torch import Tensor
class PixelShuffle(Module):
r"""Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor.
Reported by Pylint.
Line: 15
Column: 1
with a stride of :math:`1/r`.
See the paper:
`Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_
by Shi et. al (2016) for more details.
Args:
upscale_factor (int): factor to increase spatial resolution by
Reported by Pylint.
Line: 42
Column: 1
>>> print(output.size())
torch.Size([1, 1, 12, 12])
.. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network:
https://arxiv.org/abs/1609.05158
"""
__constants__ = ['upscale_factor']
upscale_factor: int
Reported by Pylint.
Line: 49
Column: 9
upscale_factor: int
def __init__(self, upscale_factor: int) -> None:
super(PixelShuffle, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, input: Tensor) -> Tensor:
return F.pixel_shuffle(input, self.upscale_factor)
Reported by Pylint.
Line: 52
Column: 5
super(PixelShuffle, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, input: Tensor) -> Tensor:
return F.pixel_shuffle(input, self.upscale_factor)
def extra_repr(self) -> str:
return 'upscale_factor={}'.format(self.upscale_factor)
Reported by Pylint.