The following issues were found
tools/linter/clang_format_all.py
14 issues
Line: 17
Column: 1
import sys
from typing import List, Set
from .clang_format_utils import get_and_check_clang_format, CLANG_FORMAT_PATH
# Allowlist of directories to check. All files that in that directory
# (recursively) will be checked.
# If you edit this, please edit the allowlist in clang_format_ci.sh as well.
CLANG_FORMAT_ALLOWLIST = [
Reported by Pylint.
Line: 39
Column: 9
Returns the set of allowlist cpp source files.
"""
matches = []
for dir in CLANG_FORMAT_ALLOWLIST:
for root, dirnames, filenames in os.walk(dir):
for filename in filenames:
if CPP_FILE_REGEX.match(filename):
matches.append(os.path.join(root, filename))
return set(matches)
Reported by Pylint.
Line: 40
Column: 19
"""
matches = []
for dir in CLANG_FORMAT_ALLOWLIST:
for root, dirnames, filenames in os.walk(dir):
for filename in filenames:
if CPP_FILE_REGEX.match(filename):
matches.append(os.path.join(root, filename))
return set(matches)
Reported by Pylint.
Line: 72
Column: 5
"""
Checks if a file is formatted correctly and returns True if so.
"""
ok = True
# -style=file picks up the closest .clang-format
cmd = "{} -style=file {}".format(CLANG_FORMAT_PATH, filename)
async with semaphore:
proc = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE)
Reported by Pylint.
Line: 86
Column: 13
with open(filename) as orig:
orig_contents = orig.read()
if formatted_contents != orig_contents:
ok = False
if verbose:
print("{} is not formatted correctly".format(filename))
return ok
Reported by Pylint.
Line: 112
Column: 5
if not diff:
args.append("-i")
ok = True
# Semaphore to bound the number of subprocesses that can be created at once to format files.
semaphore = asyncio.Semaphore(max_processes)
# Format files in parallel.
Reported by Pylint.
Line: 119
Column: 1
# Format files in parallel.
if diff:
for f in asyncio.as_completed([file_clang_formatted_correctly(f, semaphore, verbose) for f in get_allowlisted_files()]):
ok &= await f
if ok:
print("All files formatted correctly")
else:
Reported by Pylint.
Line: 119
Column: 13
# Format files in parallel.
if diff:
for f in asyncio.as_completed([file_clang_formatted_correctly(f, semaphore, verbose) for f in get_allowlisted_files()]):
ok &= await f
if ok:
print("All files formatted correctly")
else:
Reported by Pylint.
Line: 120
Column: 13
# Format files in parallel.
if diff:
for f in asyncio.as_completed([file_clang_formatted_correctly(f, semaphore, verbose) for f in get_allowlisted_files()]):
ok &= await f
if ok:
print("All files formatted correctly")
else:
print("Some files not formatted correctly")
Reported by Pylint.
Line: 127
Column: 1
else:
print("Some files not formatted correctly")
else:
await asyncio.gather(*[run_clang_format_on_file(f, semaphore, verbose) for f in get_allowlisted_files()])
return ok
def parse_args(args: List[str]) -> argparse.Namespace:
"""
Reported by Pylint.
torch/_sources.py
14 issues
Line: 7
Column: 1
from textwrap import dedent
from typing import Any, Optional, Tuple, List, NamedTuple
from torch._C import ErrorReport
from torch._C._jit_tree_views import SourceRangeFactory
def get_source_lines_and_file(
obj: Any,
error_msg: Optional[str] = None,
) -> Tuple[List[str], int, Optional[str]]:
Reported by Pylint.
Line: 1
Column: 1
import ast
import functools
import inspect
from textwrap import dedent
from typing import Any, Optional, Tuple, List, NamedTuple
from torch._C import ErrorReport
from torch._C._jit_tree_views import SourceRangeFactory
def get_source_lines_and_file(
Reported by Pylint.
Line: 22
Column: 5
try:
filename = inspect.getsourcefile(obj)
sourcelines, file_lineno = inspect.getsourcelines(obj)
except OSError as e:
msg = (f"Can't get source for {obj}. TorchScript requires source access in "
"order to carry out compilation, make sure original .py files are "
"available.")
if error_msg:
msg += '\n' + error_msg
Reported by Pylint.
Line: 51
Column: 12
return text[text.startswith(prefix) and len(prefix):]
# Find the line and line number containing the function definition
for i, l in enumerate(sourcelines):
if l.lstrip().startswith("def"):
idx = i
break
fn_def = sourcelines[idx]
Reported by Pylint.
Line: 71
Column: 1
# Thin wrapper around SourceRangeFactory to store extra metadata
# about the function-to-be-compiled.
class SourceContext(SourceRangeFactory):
def __init__(self, source, filename, file_lineno, leading_whitespace_len, uses_true_division=True):
super(SourceContext, self).__init__(source, filename, file_lineno, leading_whitespace_len)
self.uses_true_division = uses_true_division
self.filename = filename
Reported by Pylint.
Line: 71
Column: 1
# Thin wrapper around SourceRangeFactory to store extra metadata
# about the function-to-be-compiled.
class SourceContext(SourceRangeFactory):
def __init__(self, source, filename, file_lineno, leading_whitespace_len, uses_true_division=True):
super(SourceContext, self).__init__(source, filename, file_lineno, leading_whitespace_len)
self.uses_true_division = uses_true_division
self.filename = filename
Reported by Pylint.
Line: 72
Column: 5
# Thin wrapper around SourceRangeFactory to store extra metadata
# about the function-to-be-compiled.
class SourceContext(SourceRangeFactory):
def __init__(self, source, filename, file_lineno, leading_whitespace_len, uses_true_division=True):
super(SourceContext, self).__init__(source, filename, file_lineno, leading_whitespace_len)
self.uses_true_division = uses_true_division
self.filename = filename
Reported by Pylint.
Line: 72
Column: 1
# Thin wrapper around SourceRangeFactory to store extra metadata
# about the function-to-be-compiled.
class SourceContext(SourceRangeFactory):
def __init__(self, source, filename, file_lineno, leading_whitespace_len, uses_true_division=True):
super(SourceContext, self).__init__(source, filename, file_lineno, leading_whitespace_len)
self.uses_true_division = uses_true_division
self.filename = filename
Reported by Pylint.
Line: 73
Column: 9
# about the function-to-be-compiled.
class SourceContext(SourceRangeFactory):
def __init__(self, source, filename, file_lineno, leading_whitespace_len, uses_true_division=True):
super(SourceContext, self).__init__(source, filename, file_lineno, leading_whitespace_len)
self.uses_true_division = uses_true_division
self.filename = filename
@functools.lru_cache(maxsize=None)
Reported by Pylint.
Line: 79
Column: 1
@functools.lru_cache(maxsize=None)
def make_source_context(*args):
return SourceContext(*args)
def fake_range():
return SourceContext('', None, 0, 0).make_raw_range(0, 1)
Reported by Pylint.
torch/ao/sparsity/sparsifier/weight_norm_sparsifier.py
14 issues
Line: 6
Column: 1
import torch
import torch.nn.functional as F
from .base_sparsifier import BaseSparsifier
def _flat_idx_to_2d(idx, shape):
rows = idx // shape[1]
cols = idx % shape[1]
return rows, cols
Reported by Pylint.
Line: 33
Column: 25
# TODO: Add support for multiple parametrizations for the same weight
mask = layer.parametrizations.weight[0].mask
if sparsity_level <= 0:
mask.data = torch.ones(layer.weight.shape, device=layer.weight.device)
elif sparsity_level >= 1.0:
mask.data = torch.zeros(layer.weight.shape, device=layer.weight.device)
else:
ww = layer.weight * layer.weight
ww_reshaped = ww.reshape(1, *ww.shape)
Reported by Pylint.
Line: 35
Column: 25
if sparsity_level <= 0:
mask.data = torch.ones(layer.weight.shape, device=layer.weight.device)
elif sparsity_level >= 1.0:
mask.data = torch.zeros(layer.weight.shape, device=layer.weight.device)
else:
ww = layer.weight * layer.weight
ww_reshaped = ww.reshape(1, *ww.shape)
ww_pool = F.avg_pool2d(ww_reshaped, kernel_size=sparse_block_shape,
stride=sparse_block_shape, ceil_mode=True)
Reported by Pylint.
Line: 42
Column: 29
ww_pool = F.avg_pool2d(ww_reshaped, kernel_size=sparse_block_shape,
stride=sparse_block_shape, ceil_mode=True)
ww_pool_flat = ww_pool.flatten()
_, sorted_idx = torch.sort(ww_pool_flat)
threshold_idx = int(round(sparsity_level * len(sorted_idx)))
sorted_idx = sorted_idx[:threshold_idx]
rows, cols = _flat_idx_to_2d(sorted_idx, ww_pool.shape[1:])
rows *= sparse_block_shape[0]
cols *= sparse_block_shape[1]
Reported by Pylint.
Line: 49
Column: 24
rows *= sparse_block_shape[0]
cols *= sparse_block_shape[1]
new_mask = torch.ones(ww.shape, device=layer.weight.device)
for row, col in zip(rows, cols):
new_mask[row:row + sparse_block_shape[0],
col:col + sparse_block_shape[1]] = 0
mask.data *= new_mask
Reported by Pylint.
Line: 26
Column: 1
}
super().__init__(defaults=defaults)
def update_mask(self, layer, sparsity_level, sparse_block_shape,
zeros_per_block, **kwargs):
if zeros_per_block != reduce((lambda x, y: x * y), sparse_block_shape):
raise NotImplementedError('Partial block sparsity is not yet there')
# TODO: Add support for multiple parametrizations for the same weight
mask = layer.parametrizations.weight[0].mask
Reported by Pylint.
Line: 30
Column: 3
zeros_per_block, **kwargs):
if zeros_per_block != reduce((lambda x, y: x * y), sparse_block_shape):
raise NotImplementedError('Partial block sparsity is not yet there')
# TODO: Add support for multiple parametrizations for the same weight
mask = layer.parametrizations.weight[0].mask
if sparsity_level <= 0:
mask.data = torch.ones(layer.weight.shape, device=layer.weight.device)
elif sparsity_level >= 1.0:
mask.data = torch.zeros(layer.weight.shape, device=layer.weight.device)
Reported by Pylint.
Line: 1
Column: 1
from functools import reduce
import torch
import torch.nn.functional as F
from .base_sparsifier import BaseSparsifier
def _flat_idx_to_2d(idx, shape):
rows = idx // shape[1]
Reported by Pylint.
Line: 13
Column: 1
cols = idx % shape[1]
return rows, cols
class WeightNormSparsifier(BaseSparsifier):
def __init__(self,
sparsity_level=0.5, sparse_block_shape=(1, 4),
zeros_per_block=None):
if zeros_per_block is None:
zeros_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
Reported by Pylint.
Line: 13
Column: 1
cols = idx % shape[1]
return rows, cols
class WeightNormSparsifier(BaseSparsifier):
def __init__(self,
sparsity_level=0.5, sparse_block_shape=(1, 4),
zeros_per_block=None):
if zeros_per_block is None:
zeros_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
Reported by Pylint.
torch/csrc/jit/serialization/unpickler.cpp
14 issues
Line: 748
Column: 5
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
AT_ASSERT(sz > buffer_remaining_);
const size_t from_old_buf = buffer_remaining_;
if (from_old_buf != 0) {
memcpy(dest, buffer_.data() + buffer_pos_, from_old_buf);
}
const size_t needed = sz - from_old_buf;
// Full read into the buffer. The calls here all explicitly
// assume that one buffer will be enough for any sz.
AT_ASSERT(sz <= buffer_.size());
Reported by FlawFinder.
Line: 758
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
if (buffer_remaining_ < needed) {
AT_ERROR("Unexpected end of pickler archive.");
}
memcpy(dest + from_old_buf, buffer_.data(), needed);
buffer_pos_ = needed; // assignment (0'ed from read)
buffer_remaining_ -= needed;
}
// Read a number of bytes from the input stream
Reported by FlawFinder.
Line: 180
Column: 23
CWE codes:
120
20
double Unpickler::readFloat() {
AT_ASSERT(sizeof(double) == 8);
double big_endian = read<double>();
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
double little_endian;
// Pickle floats are big endian, so reverse the bytes
auto big_endian_ptr = reinterpret_cast<const char*>(&big_endian);
Reported by FlawFinder.
Line: 202
Column: 22
CWE codes:
120
20
"Expected PROTO opcode at the start"
" of pickle archive, found ",
int(static_cast<uint8_t>(opcode)));
uint8_t protocol = read<uint8_t>();
TORCH_CHECK(
protocol == 2,
"Only Pickle protocol 2 is supported, found protocol = ",
protocol);
Reported by FlawFinder.
Line: 264
Column: 24
CWE codes:
120
20
stack_.emplace_back(empty_tuple_);
} break;
case PickleOpCode::BINPUT: {
size_t memo_id = read<uint8_t>();
setInput(memo_id);
} break;
case PickleOpCode::LONG_BINPUT: {
TORCH_CHECK(
std::numeric_limits<size_t>::max() >=
Reported by FlawFinder.
Line: 273
Column: 24
CWE codes:
120
20
std::numeric_limits<uint32_t>::max(),
"Found a LONG_BINPUT opcode, but size_t on this system is "
"not big enough to decode it");
size_t memo_id = read<uint32_t>();
setInput(memo_id);
} break;
case PickleOpCode::MARK: {
// Mark location of the container ivalue in the stack
marks_.push_back(stack_.size());
Reported by FlawFinder.
Line: 290
Column: 23
CWE codes:
120
20
stack_.emplace_back(IValue());
} break;
case PickleOpCode::BININT1: {
uint8_t value = read<uint8_t>();
stack_.emplace_back(int64_t(value));
} break;
case PickleOpCode::BININT2: {
uint16_t value = read<uint16_t>();
stack_.emplace_back(int64_t(value));
Reported by FlawFinder.
Line: 294
Column: 24
CWE codes:
120
20
stack_.emplace_back(int64_t(value));
} break;
case PickleOpCode::BININT2: {
uint16_t value = read<uint16_t>();
stack_.emplace_back(int64_t(value));
} break;
case PickleOpCode::BININT: {
int32_t value = read<int32_t>();
stack_.emplace_back(int64_t(value));
Reported by FlawFinder.
Line: 298
Column: 23
CWE codes:
120
20
stack_.emplace_back(int64_t(value));
} break;
case PickleOpCode::BININT: {
int32_t value = read<int32_t>();
stack_.emplace_back(int64_t(value));
} break;
case PickleOpCode::LONG1: {
// Only read LONG1s with 8 as the length
uint8_t length = read<uint8_t>();
Reported by FlawFinder.
Line: 303
Column: 24
CWE codes:
120
20
} break;
case PickleOpCode::LONG1: {
// Only read LONG1s with 8 as the length
uint8_t length = read<uint8_t>();
TORCH_CHECK(length == 8, "Expected length to be 8, got ", int(length));
stack_.emplace_back(int64_t(read<int64_t>()));
} break;
case PickleOpCode::BINUNICODE: {
uint32_t length = read<uint32_t>();
Reported by FlawFinder.
benchmarks/operator_benchmark/pt/chunk_test.py
14 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for Chunk operator"""
# Configs for PT Chunk operator
chunk_short_configs = op_bench.config_list(
Reported by Pylint.
Line: 9
Column: 23
# Configs for PT Chunk operator
chunk_short_configs = op_bench.config_list(
attr_names=["M", "N", "chunks"],
attrs=[
[8, 8, 2],
[256, 512, 2],
[512, 512, 2],
Reported by Pylint.
Line: 22
Column: 23
tags=["short"],
)
chunks_long_configs = op_bench.cross_product_configs(
M=[128, 1024],
N=[128, 1024],
chunks=[2, 4],
device=['cpu', 'cuda'],
tags=['long']
Reported by Pylint.
Line: 31
Column: 22
)
class ChunkBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, chunks, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device),
"chunks": chunks
}
Reported by Pylint.
Line: 43
Column: 1
return torch.chunk(input_one, chunks)
op_bench.generate_pt_test(chunk_short_configs + chunks_long_configs,
ChunkBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 5
Column: 1
import torch
"""Microbenchmarks for Chunk operator"""
# Configs for PT Chunk operator
chunk_short_configs = op_bench.config_list(
attr_names=["M", "N", "chunks"],
Reported by Pylint.
Line: 33
Column: 9
class ChunkBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, chunks, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device),
"chunks": chunks
}
self.set_module_name("chunk")
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for Chunk operator"""
# Configs for PT Chunk operator
chunk_short_configs = op_bench.config_list(
Reported by Pylint.
Line: 31
Column: 1
)
class ChunkBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, chunks, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device),
"chunks": chunks
}
Reported by Pylint.
Line: 32
Column: 5
class ChunkBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, chunks, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device),
"chunks": chunks
}
self.set_module_name("chunk")
Reported by Pylint.
benchmarks/operator_benchmark/pt/fill_test.py
14 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmark for Fill_ operator."""
fill_short_configs = op_bench.config_list(
attr_names=["N"],
attrs=[
[1],
Reported by Pylint.
Line: 6
Column: 22
"""Microbenchmark for Fill_ operator."""
fill_short_configs = op_bench.config_list(
attr_names=["N"],
attrs=[
[1],
[1024],
[2048],
Reported by Pylint.
Line: 20
Column: 21
tags=["short"],
)
fill_long_configs = op_bench.cross_product_configs(
N=[10, 1000],
device=torch.testing.get_all_device_types(),
dtype=[torch.bool, torch.int8, torch.uint8, torch.int16, torch.int32,
torch.int64, torch.half, torch.float, torch.double],
tags=["long"]
Reported by Pylint.
Line: 29
Column: 22
)
class Fill_Benchmark(op_bench.TorchBenchmarkBase):
def init(self, N, device, dtype):
self.inputs = {
"input_one": torch.zeros(N, device=device).type(dtype)
}
self.set_module_name("fill_")
Reported by Pylint.
Line: 40
Column: 1
return input_one.fill_(10)
op_bench.generate_pt_test(fill_short_configs + fill_long_configs,
Fill_Benchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmark for Fill_ operator."""
fill_short_configs = op_bench.config_list(
attr_names=["N"],
attrs=[
[1],
Reported by Pylint.
Line: 31
Column: 9
class Fill_Benchmark(op_bench.TorchBenchmarkBase):
def init(self, N, device, dtype):
self.inputs = {
"input_one": torch.zeros(N, device=device).type(dtype)
}
self.set_module_name("fill_")
def forward(self, input_one):
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmark for Fill_ operator."""
fill_short_configs = op_bench.config_list(
attr_names=["N"],
attrs=[
[1],
Reported by Pylint.
Line: 29
Column: 1
)
class Fill_Benchmark(op_bench.TorchBenchmarkBase):
def init(self, N, device, dtype):
self.inputs = {
"input_one": torch.zeros(N, device=device).type(dtype)
}
self.set_module_name("fill_")
Reported by Pylint.
Line: 29
Column: 1
)
class Fill_Benchmark(op_bench.TorchBenchmarkBase):
def init(self, N, device, dtype):
self.inputs = {
"input_one": torch.zeros(N, device=device).type(dtype)
}
self.set_module_name("fill_")
Reported by Pylint.
benchmarks/operator_benchmark/pt/layernorm_test.py
14 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
"""Microbenchmarks for layernorm operator."""
layernorm_configs_short = op_bench.cross_product_configs(
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
"""Microbenchmarks for layernorm operator."""
layernorm_configs_short = op_bench.cross_product_configs(
Reported by Pylint.
Line: 9
Column: 27
"""Microbenchmarks for layernorm operator."""
layernorm_configs_short = op_bench.cross_product_configs(
dims=(
(1, 8, 16),
(8, 8, 16),
(32, 8, 16),
(64, 128, 56, 56),
Reported by Pylint.
Line: 20
Column: 26
)
class LayerNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims):
input = (torch.rand(*dims) - 0.5) * 256
self.inputs = {
"input": input,
"weight": torch.rand(*input.size()[1:], dtype=torch.float),
Reported by Pylint.
Line: 35
Column: 1
input, input.size()[1:], weight=weight, bias=bias, eps=eps)
op_bench.generate_pt_test(layernorm_configs_short, LayerNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 7
Column: 1
import torch.nn.functional as F
"""Microbenchmarks for layernorm operator."""
layernorm_configs_short = op_bench.cross_product_configs(
dims=(
(1, 8, 16),
(8, 8, 16),
Reported by Pylint.
Line: 22
Column: 9
class LayerNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims):
input = (torch.rand(*dims) - 0.5) * 256
self.inputs = {
"input": input,
"weight": torch.rand(*input.size()[1:], dtype=torch.float),
"bias": torch.rand(*input.size()[1:], dtype=torch.float),
"eps": 1e-5
Reported by Pylint.
Line: 23
Column: 9
class LayerNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims):
input = (torch.rand(*dims) - 0.5) * 256
self.inputs = {
"input": input,
"weight": torch.rand(*input.size()[1:], dtype=torch.float),
"bias": torch.rand(*input.size()[1:], dtype=torch.float),
"eps": 1e-5
}
Reported by Pylint.
Line: 30
Column: 23
"eps": 1e-5
}
def forward(self, input, weight, bias, eps: float):
return F.layer_norm(
input, input.size()[1:], weight=weight, bias=bias, eps=eps)
op_bench.generate_pt_test(layernorm_configs_short, LayerNormBenchmark)
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.functional as F
"""Microbenchmarks for layernorm operator."""
layernorm_configs_short = op_bench.cross_product_configs(
Reported by Pylint.
caffe2/contrib/warpctc/ctc_ops_test.py
14 issues
Line: 1
Column: 1
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, dyndep, test_util
Reported by Pylint.
Line: 14
Column: 1
workspace.GlobalInit(["python"])
def softmax(w):
maxes = np.amax(w, axis=-1, keepdims=True)
e = np.exp(w - maxes)
dist = e / np.sum(e, axis=-1, keepdims=True)
return dist
Reported by Pylint.
Line: 14
Column: 1
workspace.GlobalInit(["python"])
def softmax(w):
maxes = np.amax(w, axis=-1, keepdims=True)
e = np.exp(w - maxes)
dist = e / np.sum(e, axis=-1, keepdims=True)
return dist
Reported by Pylint.
Line: 16
Column: 5
def softmax(w):
maxes = np.amax(w, axis=-1, keepdims=True)
e = np.exp(w - maxes)
dist = e / np.sum(e, axis=-1, keepdims=True)
return dist
class CTCOpsTest(test_util.TestCase):
Reported by Pylint.
Line: 21
Column: 1
return dist
class CTCOpsTest(test_util.TestCase):
def verify_cost(self, device_option, is_test, skip_input_lengths=False):
alphabet_size = 5
N = 1
T = 2
Reported by Pylint.
Line: 22
Column: 5
class CTCOpsTest(test_util.TestCase):
def verify_cost(self, device_option, is_test, skip_input_lengths=False):
alphabet_size = 5
N = 1
T = 2
inputs = np.asarray(
Reported by Pylint.
Line: 22
Column: 5
class CTCOpsTest(test_util.TestCase):
def verify_cost(self, device_option, is_test, skip_input_lengths=False):
alphabet_size = 5
N = 1
T = 2
inputs = np.asarray(
Reported by Pylint.
Line: 24
Column: 9
class CTCOpsTest(test_util.TestCase):
def verify_cost(self, device_option, is_test, skip_input_lengths=False):
alphabet_size = 5
N = 1
T = 2
inputs = np.asarray(
[
[[0.1, 0.6, 0.1, 0.1, 0.1]],
Reported by Pylint.
Line: 25
Column: 9
def verify_cost(self, device_option, is_test, skip_input_lengths=False):
alphabet_size = 5
N = 1
T = 2
inputs = np.asarray(
[
[[0.1, 0.6, 0.1, 0.1, 0.1]],
[[0.1, 0.1, 0.6, 0.1, 0.1]],
Reported by Pylint.
Line: 66
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if not is_test:
# Make sure inputs_grad was added by AddGradientOperators and
# it is equal to the inputs_grad_to_be_copied blob returned by CTCop
assert np.array_equal(
self.ws.blobs["inputs_grad"].fetch(),
self.ws.blobs["inputs_grad_to_be_copied"].fetch()
)
def test_ctc_cost_cpu(self):
Reported by Bandit.
caffe2/experiments/python/sparse_funhash_op_test.py
14 issues
Line: 22
Column: 1
import numpy as np
from scipy.sparse import coo_matrix
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
Reported by Pylint.
Line: 24
Column: 1
import numpy as np
from scipy.sparse import coo_matrix
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 25
Column: 1
from scipy.sparse import coo_matrix
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 40
Column: 26
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 31
Column: 1
import caffe2.python.hypothesis_test_util as hu
class TestFunHash(hu.HypothesisTestCase):
@given(n_out=st.integers(min_value=5, max_value=20),
n_in=st.integers(min_value=10, max_value=20),
n_data=st.integers(min_value=2, max_value=8),
n_weight=st.integers(min_value=8, max_value=15),
n_alpha=st.integers(min_value=3, max_value=8),
Reported by Pylint.
Line: 39
Column: 5
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
Reported by Pylint.
Line: 39
Column: 5
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
Reported by Pylint.
Line: 39
Column: 5
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
Reported by Pylint.
Line: 39
Column: 5
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
Reported by Pylint.
benchmarks/distributed/rpc/parameter_server/metrics/ProcessedMetricsPrinter.py
14 issues
Line: 3
Column: 1
import statistics
import pandas as pd
from tabulate import tabulate
class ProcessedMetricsPrinter:
def print_data_frame(self, name, processed_metrics):
Reported by Pylint.
Line: 4
Column: 1
import statistics
import pandas as pd
from tabulate import tabulate
class ProcessedMetricsPrinter:
def print_data_frame(self, name, processed_metrics):
Reported by Pylint.
Line: 1
Column: 1
import statistics
import pandas as pd
from tabulate import tabulate
class ProcessedMetricsPrinter:
def print_data_frame(self, name, processed_metrics):
Reported by Pylint.
Line: 1
Column: 1
import statistics
import pandas as pd
from tabulate import tabulate
class ProcessedMetricsPrinter:
def print_data_frame(self, name, processed_metrics):
Reported by Pylint.
Line: 7
Column: 1
from tabulate import tabulate
class ProcessedMetricsPrinter:
def print_data_frame(self, name, processed_metrics):
print(f"metrics for {name}")
data_frame = self.get_data_frame(processed_metrics)
print(tabulate(data_frame, showindex=False, headers=data_frame.columns, tablefmt="grid"))
Reported by Pylint.
Line: 9
Column: 5
class ProcessedMetricsPrinter:
def print_data_frame(self, name, processed_metrics):
print(f"metrics for {name}")
data_frame = self.get_data_frame(processed_metrics)
print(tabulate(data_frame, showindex=False, headers=data_frame.columns, tablefmt="grid"))
def combine_processed_metrics(self, processed_metrics_list):
Reported by Pylint.
Line: 14
Column: 5
data_frame = self.get_data_frame(processed_metrics)
print(tabulate(data_frame, showindex=False, headers=data_frame.columns, tablefmt="grid"))
def combine_processed_metrics(self, processed_metrics_list):
r"""
A method that merges the value arrays of the keys in the dictionary
of processed metrics.
Args:
Reported by Pylint.
Line: 54
Column: 5
processed_metric_totals[metric_name] += values
return processed_metric_totals
def get_data_frame(self, processed_metrics):
df = pd.DataFrame(
columns=['name', 'min', 'max', 'mean', 'variance', 'stdev']
)
for metric_name in sorted(processed_metrics.keys()):
values = processed_metrics[metric_name]
Reported by Pylint.
Line: 54
Column: 5
processed_metric_totals[metric_name] += values
return processed_metric_totals
def get_data_frame(self, processed_metrics):
df = pd.DataFrame(
columns=['name', 'min', 'max', 'mean', 'variance', 'stdev']
)
for metric_name in sorted(processed_metrics.keys()):
values = processed_metrics[metric_name]
Reported by Pylint.
Line: 55
Column: 9
return processed_metric_totals
def get_data_frame(self, processed_metrics):
df = pd.DataFrame(
columns=['name', 'min', 'max', 'mean', 'variance', 'stdev']
)
for metric_name in sorted(processed_metrics.keys()):
values = processed_metrics[metric_name]
row = {
Reported by Pylint.