The following issues were found
test/distributed/pipeline/sync/skip/test_leak.py
42 issues
Line: 7
Column: 1
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
Reported by Pylint.
Line: 8
Column: 1
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker
Reported by Pylint.
Line: 9
Column: 1
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker
Reported by Pylint.
Line: 11
Column: 1
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker
@skippable(stash=["skip"])
Reported by Pylint.
Line: 12
Column: 1
from torch import nn
from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker
@skippable(stash=["skip"])
class Stash(nn.Module):
Reported by Pylint.
Line: 13
Column: 1
from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker
@skippable(stash=["skip"])
class Stash(nn.Module):
def forward(self, input):
Reported by Pylint.
Line: 18
Column: 23
@skippable(stash=["skip"])
class Stash(nn.Module):
def forward(self, input):
yield stash("skip", input)
return input # noqa: B901
@skippable(pop=["skip"])
Reported by Pylint.
Line: 25
Column: 23
@skippable(pop=["skip"])
class Pop(nn.Module):
def forward(self, input):
skip = yield pop("skip")
return input + skip
@pytest.mark.parametrize("train", [True, False], ids=["train", "eval"])
Reported by Pylint.
Line: 32
Column: 50
@pytest.mark.parametrize("train", [True, False], ids=["train", "eval"])
@pytest.mark.parametrize("checkpoint", ["always", "except_last", "never"])
def test_delete_portal_tensor(train, checkpoint, setup_rpc):
# Without checkpointing:
# +- Stash --+ +--- Pop ----+ - - - layers
# | 2,blue,1 |--| 1,orange,0 | - - - tensor_life and portal function
# +----------+ +------------+
#
Reported by Pylint.
Line: 59
Column: 5
stash_ = Stash()
@stash_.register_forward_hook
def check_portal_tensor_after_stash(*_):
if is_checkpointing():
assert portal_tensor_life_is(2)
elif is_recomputing():
assert portal_tensor_life_is(0)
else:
Reported by Pylint.
caffe2/python/gradient_checker.py
42 issues
Line: 125
Column: 9
input_to_check, step_size=0.0001,
threshold=0.05, print_net=True):
net_results, net_grads, full_net = _get_grad(
net, [], outputs_with_grad, input_values, [input_to_check])
analytic_grad = net_grads[input_to_check]
def GetLoss(new_value):
workspace.blobs[input_to_check] = new_value
Reported by Pylint.
Line: 267
Column: 3
op.device_option.CopyFrom(self._device_option)
if grad_ops is None:
# TODO(jiayq): use the gradient registration instead of the old
# hack.
grad_ops, g_input = getGradientForOp(op)
_input_device_options = input_device_options or \
Reported by Pylint.
Line: 283
Column: 9
# Get the loss and gradient for the original.
grad_name = g_input[input_to_check]
loss, grad = self.GetLossAndGrad(
op, grad_ops, inputs, op.input, input_to_check, grad_name,
outputs_with_grads,
)
grad_estimate = np.zeros_like(inputs[input_to_check])
if grad_estimate.shape != grad.shape:
Reported by Pylint.
Line: 1
Column: 1
## @package gradient_checker
# Module caffe2.python.gradient_checker
import os
import numpy as np
Reported by Pylint.
Line: 15
Column: 1
from caffe2.proto import caffe2_pb2
def getGradientForOp(op):
return core.GradientRegistry.GetGradientForOp(
op, [s + '_grad' for s in op.output])
def _get_grad_blob(grad_map, input_to_check):
Reported by Pylint.
Line: 15
Column: 1
from caffe2.proto import caffe2_pb2
def getGradientForOp(op):
return core.GradientRegistry.GetGradientForOp(
op, [s + '_grad' for s in op.output])
def _get_grad_blob(grad_map, input_to_check):
Reported by Pylint.
Line: 15
Column: 1
from caffe2.proto import caffe2_pb2
def getGradientForOp(op):
return core.GradientRegistry.GetGradientForOp(
op, [s + '_grad' for s in op.output])
def _get_grad_blob(grad_map, input_to_check):
Reported by Pylint.
Line: 29
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
# If grad_blob is not a single blob, it should be a gradient slice.
# To make it comparable with the estimiated gradient which is dense,
# we need to first convert grad_blob to dense gradient.
assert isinstance(grad_blob, core.GradientSlice)
dense_grad = 'tmp_dense_grad'
sparse_to_dense_op = core.CreateOperator(
'SparseToDense',
[grad_blob.indices, grad_blob.values, input_to_check],
dense_grad,
Reported by Bandit.
Line: 48
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
workspace.blobs[name] = value
for input_to_check in inputs_with_grads:
assert input_to_check in grad_map, (
'{} has no gradient, cannot check net gradient.'.format(
input_to_check))
assert str(input_to_check) in workspace.blobs
workspace.RunNetOnce(grad_net)
Reported by Bandit.
Line: 51
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert input_to_check in grad_map, (
'{} has no gradient, cannot check net gradient.'.format(
input_to_check))
assert str(input_to_check) in workspace.blobs
workspace.RunNetOnce(grad_net)
forward_results = [(output, workspace.blobs[output]) for output in outputs]
grads = {input_to_check: _get_grad_blob(grad_map, input_to_check)
for input_to_check in inputs_with_grads}
Reported by Bandit.
test/jit/test_warn.py
42 issues
Line: 5
Column: 1
import sys
import io
import torch
import warnings
from contextlib import redirect_stderr
from torch.testing import FileCheck
# Make the helper files in test/ importable
Reported by Pylint.
Line: 8
Column: 1
import torch
import warnings
from contextlib import redirect_stderr
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 13
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
import io
import torch
import warnings
from contextlib import redirect_stderr
from torch.testing import FileCheck
Reported by Pylint.
Line: 6
Column: 1
import io
import torch
import warnings
from contextlib import redirect_stderr
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 7
Column: 1
import torch
import warnings
from contextlib import redirect_stderr
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 13
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 21
Column: 1
"instead.")
class TestWarn(JitTestCase):
def test_warn(self):
@torch.jit.script
def fn():
warnings.warn("I am warning you")
Reported by Pylint.
Line: 22
Column: 5
class TestWarn(JitTestCase):
def test_warn(self):
@torch.jit.script
def fn():
warnings.warn("I am warning you")
f = io.StringIO()
Reported by Pylint.
Line: 22
Column: 5
class TestWarn(JitTestCase):
def test_warn(self):
@torch.jit.script
def fn():
warnings.warn("I am warning you")
f = io.StringIO()
Reported by Pylint.
caffe2/python/operator_test/listwise_l2r_operator_test.py
42 issues
Line: 4
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given
Reported by Pylint.
Line: 7
Column: 1
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given
class TestListwiseL2rOps(hu.HypothesisTestCase):
def ref_lambda_rank_loss(
self, y, r, use_ndcg_as_loss, use_idcg_normalization, use_exp_gain
Reported by Pylint.
Line: 1
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given
class TestListwiseL2rOps(hu.HypothesisTestCase):
def ref_lambda_rank_loss(
self, y, r, use_ndcg_as_loss, use_idcg_normalization, use_exp_gain
):
n = len(y)
Reported by Pylint.
Line: 11
Column: 5
class TestListwiseL2rOps(hu.HypothesisTestCase):
def ref_lambda_rank_loss(
self, y, r, use_ndcg_as_loss, use_idcg_normalization, use_exp_gain
):
n = len(y)
def get_discounts(v):
Reported by Pylint.
Line: 11
Column: 5
class TestListwiseL2rOps(hu.HypothesisTestCase):
def ref_lambda_rank_loss(
self, y, r, use_ndcg_as_loss, use_idcg_normalization, use_exp_gain
):
n = len(y)
def get_discounts(v):
Reported by Pylint.
Line: 11
Column: 5
class TestListwiseL2rOps(hu.HypothesisTestCase):
def ref_lambda_rank_loss(
self, y, r, use_ndcg_as_loss, use_idcg_normalization, use_exp_gain
):
n = len(y)
def get_discounts(v):
Reported by Pylint.
Line: 11
Column: 5
class TestListwiseL2rOps(hu.HypothesisTestCase):
def ref_lambda_rank_loss(
self, y, r, use_ndcg_as_loss, use_idcg_normalization, use_exp_gain
):
n = len(y)
def get_discounts(v):
Reported by Pylint.
Line: 11
Column: 5
class TestListwiseL2rOps(hu.HypothesisTestCase):
def ref_lambda_rank_loss(
self, y, r, use_ndcg_as_loss, use_idcg_normalization, use_exp_gain
):
n = len(y)
def get_discounts(v):
Reported by Pylint.
Line: 11
Column: 5
class TestListwiseL2rOps(hu.HypothesisTestCase):
def ref_lambda_rank_loss(
self, y, r, use_ndcg_as_loss, use_idcg_normalization, use_exp_gain
):
n = len(y)
def get_discounts(v):
Reported by Pylint.
torch/csrc/deploy/example/examples.py
42 issues
Line: 3
Column: 1
from typing import Tuple, List, Dict
import torch
import torch.nn as nn
from torch import Tensor
class Simple(torch.nn.Module):
def __init__(self, N, M):
Reported by Pylint.
Line: 4
Column: 1
from typing import Tuple, List, Dict
import torch
import torch.nn as nn
from torch import Tensor
class Simple(torch.nn.Module):
def __init__(self, N, M):
Reported by Pylint.
Line: 5
Column: 1
import torch
import torch.nn as nn
from torch import Tensor
class Simple(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
Reported by Pylint.
Line: 13
Column: 23
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
output = self.weight + input
return output
def load_library():
Reported by Pylint.
Line: 131
Column: 15
return (input1 * -1, input2 * -1)
def make_prediction(
self, input: List[Tuple[Tensor, Tensor]]
) -> List[Tuple[Tensor, Tensor]]:
return [self.forward(i[0], i[1]) for i in input]
def make_batch(
self, mega_batch: List[Tuple[Tensor, Tensor, int]], goals: Dict[str, str]
Reported by Pylint.
Line: 146
Column: 5
class MultiReturn(torch.nn.Module):
def __init__(self):
super(MultiReturn, self).__init__()
def forward(self, t):
# type: (Tuple[Tensor, Tensor]) -> Tuple[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]]
a, b = t
Reported by Pylint.
Line: 1
Column: 1
from typing import Tuple, List, Dict
import torch
import torch.nn as nn
from torch import Tensor
class Simple(torch.nn.Module):
def __init__(self, N, M):
Reported by Pylint.
Line: 8
Column: 1
from torch import Tensor
class Simple(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
Reported by Pylint.
Line: 8
Column: 1
from torch import Tensor
class Simple(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
Reported by Pylint.
Line: 9
Column: 5
class Simple(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
output = self.weight + input
Reported by Pylint.
test/cpp_api_parity/utils.py
42 issues
Line: 7
Column: 1
import warnings
import shutil
import torch
import torch.utils.cpp_extension
import torch.testing._internal.common_nn as common_nn
from torch.testing._internal.common_cuda import TEST_CUDA
# Note that this namedtuple is for C++ parity test mechanism's internal use.
Reported by Pylint.
Line: 8
Column: 1
import shutil
import torch
import torch.utils.cpp_extension
import torch.testing._internal.common_nn as common_nn
from torch.testing._internal.common_cuda import TEST_CUDA
# Note that this namedtuple is for C++ parity test mechanism's internal use.
# For guidance on how to add a new C++ parity test, please see
Reported by Pylint.
Line: 9
Column: 1
import torch
import torch.utils.cpp_extension
import torch.testing._internal.common_nn as common_nn
from torch.testing._internal.common_cuda import TEST_CUDA
# Note that this namedtuple is for C++ parity test mechanism's internal use.
# For guidance on how to add a new C++ parity test, please see
# NOTE [How to check NN module / functional API parity between Python and C++ frontends]
Reported by Pylint.
Line: 10
Column: 1
import torch
import torch.utils.cpp_extension
import torch.testing._internal.common_nn as common_nn
from torch.testing._internal.common_cuda import TEST_CUDA
# Note that this namedtuple is for C++ parity test mechanism's internal use.
# For guidance on how to add a new C++ parity test, please see
# NOTE [How to check NN module / functional API parity between Python and C++ frontends]
TorchNNModuleTestParams = namedtuple(
Reported by Pylint.
Line: 212
Column: 31
def add_cpp_forward_args(args):
args_stmts = []
for arg_name, _ in args:
args_stmts.append('auto {} = arg_dict.at("{}")'.format(arg_name, arg_name))
cpp_forward_args_symbols.append(arg_name)
return args_stmts
cpp_forward_input_args_stmts = set_cpp_tensors_requires_grad(move_cpp_tensors_to_device(
add_cpp_forward_args(test_params.arg_dict['input']), device), test_params.arg_dict['input'])
Reported by Pylint.
Line: 226
Column: 37
# Build the list of other arguments needed
cpp_other_args_stmts = []
for arg_name, _ in test_params.arg_dict['other']:
cpp_other_args_stmts.append('auto {} = arg_dict.at("{}")'.format(arg_name, arg_name))
cpp_other_args_stmts = move_cpp_tensors_to_device(cpp_other_args_stmts, device)
cpp_args_construction_stmts = cpp_forward_input_args_stmts + cpp_forward_target_args_stmts + \
cpp_forward_extra_args_stmts + cpp_other_args_stmts
Reported by Pylint.
Line: 285
Column: 58
for i, arg in enumerate(args):
arg_dict[arg_type].append(CppArg(name=arg_type_prefix + str(i), value=arg))
put_args_into_arg_dict('input', 'i', convert_to_list(test_instance._get_input()))
if is_criterion_test(test_instance):
put_args_into_arg_dict('target', 't', convert_to_list(test_instance._get_target()))
if test_instance.extra_args:
put_args_into_arg_dict('extra_args', 'e', convert_to_list(test_instance.extra_args))
Reported by Pylint.
Line: 287
Column: 63
put_args_into_arg_dict('input', 'i', convert_to_list(test_instance._get_input()))
if is_criterion_test(test_instance):
put_args_into_arg_dict('target', 't', convert_to_list(test_instance._get_target()))
if test_instance.extra_args:
put_args_into_arg_dict('extra_args', 'e', convert_to_list(test_instance.extra_args))
cpp_var_map = test_params_dict.get('cpp_var_map', {})
for arg_name, arg_value in cpp_var_map.items():
Reported by Pylint.
Line: 295
Column: 70
for arg_name, arg_value in cpp_var_map.items():
if isinstance(arg_value, str):
if arg_value == '_get_input()':
arg_dict['other'].append(CppArg(name=arg_name, value=test_instance._get_input()))
else:
raise RuntimeError("`{}` has unsupported string value: {}".format(arg_name, arg_value))
elif isinstance(arg_value, torch.Tensor):
arg_dict['other'].append(CppArg(name=arg_name, value=arg_value))
else:
Reported by Pylint.
Line: 355
Column: 16
# Don't block the process if this fails, but show the error message as warning.
try:
shutil.rmtree(folder_path)
except Exception as e:
warnings.warn("Non-blocking folder removal fails with the following error:\n{}".format(str(e)))
Reported by Pylint.
caffe2/python/operator_test/channel_stats_op_test.py
42 issues
Line: 9
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 10
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
Reported by Pylint.
Line: 13
Column: 1
import hypothesis.strategies as st
import numpy as np
import unittest
class TestChannelStatsOp(serial.SerializedTestCase):
def channel_stats_nchw_ref(self, X):
dims = X.shape
Reported by Pylint.
Line: 16
Column: 1
import unittest
class TestChannelStatsOp(serial.SerializedTestCase):
def channel_stats_nchw_ref(self, X):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
Reported by Pylint.
Line: 17
Column: 5
class TestChannelStatsOp(serial.SerializedTestCase):
def channel_stats_nchw_ref(self, X):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
sum1 = np.sum(X, axis=(0, 2), keepdims=False)
Reported by Pylint.
Line: 17
Column: 5
class TestChannelStatsOp(serial.SerializedTestCase):
def channel_stats_nchw_ref(self, X):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
sum1 = np.sum(X, axis=(0, 2), keepdims=False)
Reported by Pylint.
Line: 17
Column: 5
class TestChannelStatsOp(serial.SerializedTestCase):
def channel_stats_nchw_ref(self, X):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
sum1 = np.sum(X, axis=(0, 2), keepdims=False)
Reported by Pylint.
Line: 19
Column: 9
class TestChannelStatsOp(serial.SerializedTestCase):
def channel_stats_nchw_ref(self, X):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
sum1 = np.sum(X, axis=(0, 2), keepdims=False)
sum2 = np.sum(X**2, axis=(0, 2), keepdims=False)
return (sum1, sum2)
Reported by Pylint.
Line: 20
Column: 9
def channel_stats_nchw_ref(self, X):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
sum1 = np.sum(X, axis=(0, 2), keepdims=False)
sum2 = np.sum(X**2, axis=(0, 2), keepdims=False)
return (sum1, sum2)
Reported by Pylint.
aten/src/ATen/gen_vulkan_spv.py
42 issues
Line: 22
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b605_start_process_with_a_shell.html
hFilePath, cppFilePath, srcDirPath, glslcPath, tmpDirPath))
cmd = "find " + srcDirPath + " -name \"*.glsl\""
vexs = os.popen(cmd).read().split('\n')
templateSrcPaths = []
for f in vexs:
if len(f) > 1:
templateSrcPaths.append(f)
templateSrcPaths.sort()
Reported by Bandit.
Line: 8
Column: 1
import os
import sys
import subprocess
from tools.codegen.code_template import CodeTemplate
H_NAME = "spv.h"
CPP_NAME = "spv.cpp"
DEFAULT_ENV = {"precision": "highp"}
Reported by Pylint.
Line: 103
Column: 10
return d
def main(argv):
parser = argparse.ArgumentParser(description='')
parser.add_argument(
'-i',
'--glsl-path',
help='',
Reported by Pylint.
Line: 119
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b108_hardcoded_tmp_directory.html
'-t',
'--tmp-dir-path',
required=True,
help='/tmp')
parser.add_argument(
'-o',
'--output-path',
required=True,
help='')
Reported by Bandit.
Line: 1
Column: 1
#!/usr/bin/env python3
import argparse
import array
import os
import sys
import subprocess
from tools.codegen.code_template import CodeTemplate
Reported by Pylint.
Line: 7
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
import array
import os
import sys
import subprocess
from tools.codegen.code_template import CodeTemplate
H_NAME = "spv.h"
CPP_NAME = "spv.cpp"
DEFAULT_ENV = {"precision": "highp"}
Reported by Bandit.
Line: 14
Column: 1
CPP_NAME = "spv.cpp"
DEFAULT_ENV = {"precision": "highp"}
def getName(filePath):
return os.path.basename(filePath).replace("/", "_").replace(".", "_")
def genCppH(hFilePath, cppFilePath, srcDirPath, glslcPath, tmpDirPath, env):
print("hFilePath:{} cppFilePath:{} srcDirPath:{} glslcPath:{} tmpDirPath:{}".format(
hFilePath, cppFilePath, srcDirPath, glslcPath, tmpDirPath))
Reported by Pylint.
Line: 14
Column: 1
CPP_NAME = "spv.cpp"
DEFAULT_ENV = {"precision": "highp"}
def getName(filePath):
return os.path.basename(filePath).replace("/", "_").replace(".", "_")
def genCppH(hFilePath, cppFilePath, srcDirPath, glslcPath, tmpDirPath, env):
print("hFilePath:{} cppFilePath:{} srcDirPath:{} glslcPath:{} tmpDirPath:{}".format(
hFilePath, cppFilePath, srcDirPath, glslcPath, tmpDirPath))
Reported by Pylint.
Line: 14
Column: 1
CPP_NAME = "spv.cpp"
DEFAULT_ENV = {"precision": "highp"}
def getName(filePath):
return os.path.basename(filePath).replace("/", "_").replace(".", "_")
def genCppH(hFilePath, cppFilePath, srcDirPath, glslcPath, tmpDirPath, env):
print("hFilePath:{} cppFilePath:{} srcDirPath:{} glslcPath:{} tmpDirPath:{}".format(
hFilePath, cppFilePath, srcDirPath, glslcPath, tmpDirPath))
Reported by Pylint.
Line: 17
Column: 1
def getName(filePath):
return os.path.basename(filePath).replace("/", "_").replace(".", "_")
def genCppH(hFilePath, cppFilePath, srcDirPath, glslcPath, tmpDirPath, env):
print("hFilePath:{} cppFilePath:{} srcDirPath:{} glslcPath:{} tmpDirPath:{}".format(
hFilePath, cppFilePath, srcDirPath, glslcPath, tmpDirPath))
cmd = "find " + srcDirPath + " -name \"*.glsl\""
vexs = os.popen(cmd).read().split('\n')
Reported by Pylint.
torch/utils/benchmark/utils/fuzzer.py
42 issues
Line: 162
Column: 17
def dtype_size(dtype):
if dtype == torch.bool:
return 1
if dtype.is_floating_point or dtype.is_complex:
return int(torch.finfo(dtype).bits / 8)
return int(torch.iinfo(dtype).bits / 8)
Reported by Pylint.
Line: 165
Column: 20
if dtype == torch.bool:
return 1
if dtype.is_floating_point or dtype.is_complex:
return int(torch.finfo(dtype).bits / 8)
return int(torch.iinfo(dtype).bits / 8)
def prod(values, base=1):
"""np.prod can overflow, so for sizes the product should be done in Python.
Reported by Pylint.
Line: 166
Column: 16
return 1
if dtype.is_floating_point or dtype.is_complex:
return int(torch.finfo(dtype).bits / 8)
return int(torch.iinfo(dtype).bits / 8)
def prod(values, base=1):
"""np.prod can overflow, so for sizes the product should be done in Python.
Reported by Pylint.
Line: 191
Column: 15
max_allocation_bytes: Optional[int] = None,
dim_parameter: Optional[str] = None,
roll_parameter: Optional[str] = None,
dtype=torch.float32,
cuda=False,
tensor_constructor: Optional[Callable] = None
):
"""
Args:
Reported by Pylint.
Line: 263
Column: 20
@staticmethod
def default_tensor_constructor(size, dtype, **kwargs):
if dtype.is_floating_point or dtype.is_complex:
return torch.rand(size=size, dtype=dtype, device="cpu")
else:
return torch.randint(1, 127, size=size, dtype=dtype, device="cpu")
def _make_tensor(self, params, state):
size, steps, allocation_size = self._get_size_and_steps(params)
Reported by Pylint.
Line: 265
Column: 20
if dtype.is_floating_point or dtype.is_complex:
return torch.rand(size=size, dtype=dtype, device="cpu")
else:
return torch.randint(1, 127, size=size, dtype=dtype, device="cpu")
def _make_tensor(self, params, state):
size, steps, allocation_size = self._get_size_and_steps(params)
constructor = (
self._tensor_constructor or
Reported by Pylint.
Line: 372
Column: 20
ops will create reproducible Tensors.
"""
if seed is None:
seed = np.random.RandomState().randint(0, 2**63)
self._seed = seed
self._parameters = Fuzzer._unpack(parameters, FuzzedParameter)
self._tensors = Fuzzer._unpack(tensors, FuzzedTensor)
self._constraints = constraints or ()
Reported by Pylint.
Line: 394
Column: 17
))
def take(self, n):
state = np.random.RandomState(self._seed)
torch.manual_seed(state.randint(low=0, high=2 ** 63))
for _ in range(n):
params = self._generate(state)
tensors = {}
tensor_properties = {}
Reported by Pylint.
Line: 190
Column: 9
max_elements: Optional[int] = None,
max_allocation_bytes: Optional[int] = None,
dim_parameter: Optional[str] = None,
roll_parameter: Optional[str] = None,
dtype=torch.float32,
cuda=False,
tensor_constructor: Optional[Callable] = None
):
"""
Reported by Pylint.
Line: 261
Column: 1
return self._name
@staticmethod
def default_tensor_constructor(size, dtype, **kwargs):
if dtype.is_floating_point or dtype.is_complex:
return torch.rand(size=size, dtype=dtype, device="cpu")
else:
return torch.randint(1, 127, size=size, dtype=dtype, device="cpu")
Reported by Pylint.
test/distributed/elastic/rendezvous/api_test.py
41 issues
Line: 10
Column: 1
from typing import Any, Dict, SupportsInt, Tuple, cast
from unittest import TestCase
from torch.distributed import Store
from torch.distributed.elastic.rendezvous import (
RendezvousHandler,
RendezvousHandlerRegistry,
RendezvousParameters,
)
Reported by Pylint.
Line: 11
Column: 1
from unittest import TestCase
from torch.distributed import Store
from torch.distributed.elastic.rendezvous import (
RendezvousHandler,
RendezvousHandlerRegistry,
RendezvousParameters,
)
Reported by Pylint.
Line: 237
Column: 32
def test_register_raises_error_if_called_twice_with_different_creators(self) -> None:
self._registry.register("dummy_backend", self._create_handler)
other_create_handler = lambda p: _DummyRendezvousHandler(p) # noqa: E731
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous backend 'dummy_backend' cannot be registered with "
rf"'{other_create_handler}' as it is already registered with '{self._create_handler}'.$",
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, SupportsInt, Tuple, cast
from unittest import TestCase
Reported by Pylint.
Line: 18
Column: 1
)
class RendezvousParametersTest(TestCase):
def setUp(self) -> None:
self._backend = "dummy_backend"
self._endpoint = "dummy_endpoint"
self._run_id = "dummy_run_id"
self._min_nodes = 3
Reported by Pylint.
Line: 37
Column: 5
**self._kwargs,
)
def test_init_initializes_params(self) -> None:
self._kwargs["dummy_param"] = "x"
params = self._create_params()
self.assertEqual(params.backend, self._backend)
Reported by Pylint.
Line: 50
Column: 5
self.assertEqual(params.get("dummy_param"), "x")
def test_init_initializes_params_if_min_nodes_equals_to_1(self) -> None:
self._min_nodes = 1
params = self._create_params()
self.assertEqual(params.min_nodes, self._min_nodes)
Reported by Pylint.
Line: 58
Column: 5
self.assertEqual(params.min_nodes, self._min_nodes)
self.assertEqual(params.max_nodes, self._max_nodes)
def test_init_initializes_params_if_min_and_max_nodes_are_equal(self) -> None:
self._max_nodes = 3
params = self._create_params()
self.assertEqual(params.min_nodes, self._min_nodes)
Reported by Pylint.
Line: 66
Column: 5
self.assertEqual(params.min_nodes, self._min_nodes)
self.assertEqual(params.max_nodes, self._max_nodes)
def test_init_raises_error_if_backend_is_none_or_empty(self) -> None:
for backend in [None, ""]:
with self.subTest(backend=backend):
self._backend = backend # type: ignore[assignment]
with self.assertRaisesRegex(
Reported by Pylint.
Line: 77
Column: 5
):
self._create_params()
def test_init_raises_error_if_min_nodes_is_less_than_1(self) -> None:
for min_nodes in [0, -1, -5]:
with self.subTest(min_nodes=min_nodes):
self._min_nodes = min_nodes
with self.assertRaisesRegex(
Reported by Pylint.