The following issues were found
test/test_jit_legacy.py
8 issues
Line: 3
Column: 1
import sys
sys.argv.append("--jit_executor=legacy")
from test_jit import * # noqa: F403
if __name__ == '__main__':
run_tests()
import test_jit_py3
suite = unittest.findTestCases(test_jit_py3)
unittest.TextTestRunner().run(suite)
Reported by Pylint.
Line: 6
Column: 5
from test_jit import * # noqa: F403
if __name__ == '__main__':
run_tests()
import test_jit_py3
suite = unittest.findTestCases(test_jit_py3)
unittest.TextTestRunner().run(suite)
Reported by Pylint.
Line: 7
Column: 5
if __name__ == '__main__':
run_tests()
import test_jit_py3
suite = unittest.findTestCases(test_jit_py3)
unittest.TextTestRunner().run(suite)
Reported by Pylint.
Line: 8
Column: 13
if __name__ == '__main__':
run_tests()
import test_jit_py3
suite = unittest.findTestCases(test_jit_py3)
unittest.TextTestRunner().run(suite)
Reported by Pylint.
Line: 9
Column: 5
run_tests()
import test_jit_py3
suite = unittest.findTestCases(test_jit_py3)
unittest.TextTestRunner().run(suite)
Reported by Pylint.
Line: 3
Column: 1
import sys
sys.argv.append("--jit_executor=legacy")
from test_jit import * # noqa: F403
if __name__ == '__main__':
run_tests()
import test_jit_py3
suite = unittest.findTestCases(test_jit_py3)
unittest.TextTestRunner().run(suite)
Reported by Pylint.
Line: 1
Column: 1
import sys
sys.argv.append("--jit_executor=legacy")
from test_jit import * # noqa: F403
if __name__ == '__main__':
run_tests()
import test_jit_py3
suite = unittest.findTestCases(test_jit_py3)
unittest.TextTestRunner().run(suite)
Reported by Pylint.
Line: 3
Column: 1
import sys
sys.argv.append("--jit_executor=legacy")
from test_jit import * # noqa: F403
if __name__ == '__main__':
run_tests()
import test_jit_py3
suite = unittest.findTestCases(test_jit_py3)
unittest.TextTestRunner().run(suite)
Reported by Pylint.
.github/scripts/lint_native_functions.py
8 issues
Line: 17
Column: 1
the YAML, not to be prescriptive about it.
'''
import ruamel.yaml # type: ignore[import]
import difflib
import sys
from pathlib import Path
from io import StringIO
Reported by Pylint.
Line: 18
Column: 1
'''
import ruamel.yaml # type: ignore[import]
import difflib
import sys
from pathlib import Path
from io import StringIO
def fn(base: str) -> str:
Reported by Pylint.
Line: 19
Column: 1
import ruamel.yaml # type: ignore[import]
import difflib
import sys
from pathlib import Path
from io import StringIO
def fn(base: str) -> str:
return str(base / Path("aten/src/ATen/native/native_functions.yaml"))
Reported by Pylint.
Line: 20
Column: 1
import ruamel.yaml # type: ignore[import]
import difflib
import sys
from pathlib import Path
from io import StringIO
def fn(base: str) -> str:
return str(base / Path("aten/src/ATen/native/native_functions.yaml"))
Reported by Pylint.
Line: 21
Column: 1
import difflib
import sys
from pathlib import Path
from io import StringIO
def fn(base: str) -> str:
return str(base / Path("aten/src/ATen/native/native_functions.yaml"))
with open(Path(__file__).parent.parent.parent / fn('.'), "r") as f:
Reported by Pylint.
Line: 23
Column: 1
from pathlib import Path
from io import StringIO
def fn(base: str) -> str:
return str(base / Path("aten/src/ATen/native/native_functions.yaml"))
with open(Path(__file__).parent.parent.parent / fn('.'), "r") as f:
contents = f.read()
Reported by Pylint.
Line: 23
Column: 1
from pathlib import Path
from io import StringIO
def fn(base: str) -> str:
return str(base / Path("aten/src/ATen/native/native_functions.yaml"))
with open(Path(__file__).parent.parent.parent / fn('.'), "r") as f:
contents = f.read()
Reported by Pylint.
Line: 50
Column: 1
native_functions.yaml failed lint; please apply the diff below to fix lint.
If you think this is in error, please see .github/scripts/lint_native_functions.py
""", file=sys.stderr)
sys.stdout.writelines(difflib.unified_diff(contents.splitlines(True), new_contents.splitlines(True), fn('a'), fn('b')))
sys.exit(1)
Reported by Pylint.
c10/util/llvmMathExtras.h
8 issues
Line: 285
Column: 23
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
/// Macro compressed bit reversal table for 256 bits.
///
/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
static const unsigned char BitReverseTable256[256] = {
#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
R6(0),
R6(2),
Reported by FlawFinder.
Line: 301
Column: 12
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
/// Reverse the bits in \p Val.
template <typename T>
T reverseBits(T Val) {
unsigned char in[sizeof(Val)];
unsigned char out[sizeof(Val)];
std::memcpy(in, &Val, sizeof(Val));
for (unsigned i = 0; i < sizeof(Val); ++i)
out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
std::memcpy(&Val, out, sizeof(Val));
Reported by FlawFinder.
Line: 302
Column: 12
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
template <typename T>
T reverseBits(T Val) {
unsigned char in[sizeof(Val)];
unsigned char out[sizeof(Val)];
std::memcpy(in, &Val, sizeof(Val));
for (unsigned i = 0; i < sizeof(Val); ++i)
out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
std::memcpy(&Val, out, sizeof(Val));
return Val;
Reported by FlawFinder.
Line: 303
Column: 8
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
T reverseBits(T Val) {
unsigned char in[sizeof(Val)];
unsigned char out[sizeof(Val)];
std::memcpy(in, &Val, sizeof(Val));
for (unsigned i = 0; i < sizeof(Val); ++i)
out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
std::memcpy(&Val, out, sizeof(Val));
return Val;
}
Reported by FlawFinder.
Line: 612
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
inline double BitsToDouble(uint64_t Bits) {
double D;
static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
memcpy(&D, &Bits, sizeof(Bits));
return D;
}
/// This function takes a 32-bit integer and returns the bit equivalent float.
inline float BitsToFloat(uint32_t Bits) {
Reported by FlawFinder.
Line: 621
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
// TODO: Use bit_cast once C++20 becomes available.
float F;
static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
memcpy(&F, &Bits, sizeof(Bits));
return F;
}
/// This function takes a double and returns the bit equivalent 64-bit integer.
/// Note that copying doubles around changes the bits of NaNs on some hosts,
Reported by FlawFinder.
Line: 631
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
inline uint64_t DoubleToBits(double Double) {
uint64_t Bits;
static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
memcpy(&Bits, &Double, sizeof(Double));
return Bits;
}
/// This function takes a float and returns the bit equivalent 32-bit integer.
/// Note that copying floats around changes the bits of NaNs on some hosts,
Reported by FlawFinder.
Line: 641
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
inline uint32_t FloatToBits(float Float) {
uint32_t Bits;
static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
memcpy(&Bits, &Float, sizeof(Float));
return Bits;
}
/// A and B are either alignments or offsets. Return the minimum alignment that
/// may be assumed after adding the two together.
Reported by FlawFinder.
caffe2/python/modifier_context.py
8 issues
Line: 66
Column: 24
self._context_class().current().push_modifiers(self._modifiers)
return self
def __exit__(self, type, value, traceback):
self._context_class().current().pop_modifiers()
Reported by Pylint.
Line: 1
Column: 1
# @package modifier_context
# Module caffe2.python.modifier_context
DEFAULT_MODIFIER = 'DEFAULT'
Reported by Pylint.
Line: 12
Column: 1
DEFAULT_MODIFIER = 'DEFAULT'
class ModifierContext(object):
"""
provide context to allow param_info to have different modifiers
"""
def __init__(self):
Reported by Pylint.
Line: 23
Column: 13
def _rebuild_modifiers(self):
self._modifiers = {}
for m in self._modifiers_list:
self._modifiers.update(m)
def _has_modifier(self, name):
return name in self._modifiers
Reported by Pylint.
Line: 32
Column: 5
def _get_modifier(self, name):
return self._modifiers.get(name)
def push_modifiers(self, modifiers):
# modifier override is allowed
self._modifiers_list.append(modifiers)
self._modifiers.update(modifiers)
def pop_modifiers(self):
Reported by Pylint.
Line: 37
Column: 5
self._modifiers_list.append(modifiers)
self._modifiers.update(modifiers)
def pop_modifiers(self):
assert len(self._modifiers_list) > 0
self._modifiers_list.pop()
self._rebuild_modifiers()
Reported by Pylint.
Line: 38
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self._modifiers.update(modifiers)
def pop_modifiers(self):
assert len(self._modifiers_list) > 0
self._modifiers_list.pop()
self._rebuild_modifiers()
class UseModifierBase(object):
Reported by Bandit.
Line: 43
Column: 1
self._rebuild_modifiers()
class UseModifierBase(object):
'''
context class to allow setting the current context.
Example usage with layer:
modifiers = {'modifier1': modifier1, 'modifier2': modifier2}
with Modifiers(modifiers):
Reported by Pylint.
benchmarks/fastrnns/__init__.py
8 issues
Line: 1
Column: 1
from .cells import * # noqa: F403
from .factory import * # noqa: F403
# (output, next_state) = cell(input, state)
seqLength = 100
numLayers = 2
inputSize = 512
hiddenSize = 512
miniBatch = 64
Reported by Pylint.
Line: 2
Column: 1
from .cells import * # noqa: F403
from .factory import * # noqa: F403
# (output, next_state) = cell(input, state)
seqLength = 100
numLayers = 2
inputSize = 512
hiddenSize = 512
miniBatch = 64
Reported by Pylint.
Line: 1
Column: 1
from .cells import * # noqa: F403
from .factory import * # noqa: F403
# (output, next_state) = cell(input, state)
seqLength = 100
numLayers = 2
inputSize = 512
hiddenSize = 512
miniBatch = 64
Reported by Pylint.
Line: 5
Column: 1
from .factory import * # noqa: F403
# (output, next_state) = cell(input, state)
seqLength = 100
numLayers = 2
inputSize = 512
hiddenSize = 512
miniBatch = 64
Reported by Pylint.
Line: 6
Column: 1
# (output, next_state) = cell(input, state)
seqLength = 100
numLayers = 2
inputSize = 512
hiddenSize = 512
miniBatch = 64
Reported by Pylint.
Line: 7
Column: 1
# (output, next_state) = cell(input, state)
seqLength = 100
numLayers = 2
inputSize = 512
hiddenSize = 512
miniBatch = 64
Reported by Pylint.
Line: 8
Column: 1
seqLength = 100
numLayers = 2
inputSize = 512
hiddenSize = 512
miniBatch = 64
Reported by Pylint.
Line: 9
Column: 1
numLayers = 2
inputSize = 512
hiddenSize = 512
miniBatch = 64
Reported by Pylint.
caffe2/operators/lengths_reducer_ops.h
8 issues
Line: 311
Column: 7
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
// implement the functinality index_select(core, 1, ind_slice)
auto num_of_elements = ranks[idx] * factor_j[idx] * ranks[idx + 1];
for (int i = 0; i < bs; i++) {
memcpy(
tgt_slice[i].data(),
core + ind_slice[i] * num_of_elements,
num_of_elements * sizeof(T));
}
return true;
Reported by FlawFinder.
Line: 386
Column: 16
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
Z_ptr.data(),
&context_);
for (int b = 0; b < bs; b++) {
std::memcpy(Y_ptr[b], Z_ptr[b], (emb_size * max_rank) * sizeof(T));
}
rows *= factor_j[i];
}
// save the intermediate output for backward path
// shape for the core
Reported by FlawFinder.
Line: 397
Column: 16
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
auto* core_data = Output(i + 1, shape, at::dtype<T>());
T* out_core = core_data->template mutable_data<T>();
for (int b = 0; b < bs; b++) {
std::memcpy(
out_core + b * rows * ranks[i + 1],
Y_ptr[b],
rows * ranks[i + 1] * sizeof(T));
}
}
Reported by FlawFinder.
Line: 417
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (int i = 0; i <= bs; i++) {
while ((length_idx < segments) && (i == cum_lengths[length_idx])) {
// store the tmp_sum into output
memcpy(
&out_data[length_idx * emb_size],
tmp_sum.data(),
emb_size * sizeof(T));
length_idx++;
fill(tmp_sum.begin(), tmp_sum.end(), 0.0f);
Reported by FlawFinder.
Line: 553
Column: 5
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
vector<vector<int64_t>> index_slice(bs, vector<int64_t>(3, 0));
for (int64_t b = 0; b < bs; b++) {
memcpy(index_slice[b].data(), index_out_data + b * 3, 3 * sizeof(int64_t));
}
vector<const T*> A_ptr(bs);
vector<T*> B_ptr(bs);
vector<T*> C_ptr(bs);
Reported by FlawFinder.
Line: 570
Column: 7
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (int64_t start = data_index;
data_index < start + lengths_data[range_index];
++data_index) {
memcpy(
core2_out_grad[data_index].data(),
dY_data + range_index * emb_size,
emb_size * sizeof(T));
}
}
Reported by FlawFinder.
Line: 616
Column: 5
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (int i = 0; i < num_of_elements; i++) {
dCore2_data[index_slice[b][2] * num_of_elements + i] += C_ptr[b][i];
}
memcpy(
core2_slice[b].data(),
core2_data + index_slice[b][2] * num_of_elements,
sizeof(T) * num_of_elements);
}
Reported by FlawFinder.
Line: 683
Column: 5
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (int i = 0; i < num_of_elements; i++) {
dCore1_data[index_slice[b][1] * num_of_elements + i] += C_ptr[b][i];
}
memcpy(
core1_slice[b].data(),
core1_data + index_slice[b][1] * num_of_elements,
sizeof(T) * num_of_elements);
}
Reported by FlawFinder.
caffe2/python/benchmarks/sparse_lengths_sum_nbit_benchmark.py
8 issues
Line: 6
Column: 1
import argparse
import datetime
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
def benchmark_sparse_lengths_sum(
Reported by Pylint.
Line: 8
Column: 1
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
def benchmark_sparse_lengths_sum(
categorical_limit,
embedding_size,
Reported by Pylint.
Line: 1
Column: 1
import argparse
import datetime
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
Reported by Pylint.
Line: 11
Column: 1
from caffe2.python import core, workspace
def benchmark_sparse_lengths_sum(
categorical_limit,
embedding_size,
average_len,
batch_size,
iterations,
Reported by Pylint.
Line: 11
Column: 1
from caffe2.python import core, workspace
def benchmark_sparse_lengths_sum(
categorical_limit,
embedding_size,
average_len,
batch_size,
iterations,
Reported by Pylint.
Line: 28
Column: 5
data *= 17.01
init_net = core.Net("init_net")
op = core.CreateOperator(
"FloatToFused" + str(bit_rate) + "BitRowwiseQuantized", "X", "X_q"
)
init_net.Proto().op.extend([op])
workspace.FeedBlob("X", data)
Reported by Pylint.
Line: 38
Column: 5
# In order to produce truly random lengths and indices, we will embed a
# Python operator in the net to generate them.
def f(_, outputs):
lengths = np.random.randint(
int(average_len * 0.75), int(average_len * 1.25), batch_size
).astype(np.int32)
indices = np.random.randint(0, categorical_limit, np.sum(lengths)).astype(
np.int64
Reported by Pylint.
Line: 58
Column: 5
"huge_blob", np.random.randn(l3_cache_size).astype(np.float32)
)
net.Scale("huge_blob", "huge_blob_2x", value=2.0)
op = core.CreateOperator(
"SparseLengthsSumFused" + str(bit_rate) + "BitRowwise",
["X_q", "indices", "lengths"],
"Y",
)
net.Proto().op.extend([op])
Reported by Pylint.
benchmarks/distributed/rpc/parameter_server/trainer/__init__.py
8 issues
Line: 1
Column: 1
from .criterions import cel
from .ddp_models import basic_ddp_model
from .hook_states import BasicHookState
from .hooks import allreduce_hook, hybrid_hook, rpc_hook, sparse_rpc_hook
from .iteration_steps import basic_iteration_step
from .preprocess_data import preprocess_dummy_data
from .trainer import DdpTrainer
criterion_map = {
Reported by Pylint.
Line: 2
Column: 1
from .criterions import cel
from .ddp_models import basic_ddp_model
from .hook_states import BasicHookState
from .hooks import allreduce_hook, hybrid_hook, rpc_hook, sparse_rpc_hook
from .iteration_steps import basic_iteration_step
from .preprocess_data import preprocess_dummy_data
from .trainer import DdpTrainer
criterion_map = {
Reported by Pylint.
Line: 3
Column: 1
from .criterions import cel
from .ddp_models import basic_ddp_model
from .hook_states import BasicHookState
from .hooks import allreduce_hook, hybrid_hook, rpc_hook, sparse_rpc_hook
from .iteration_steps import basic_iteration_step
from .preprocess_data import preprocess_dummy_data
from .trainer import DdpTrainer
criterion_map = {
Reported by Pylint.
Line: 4
Column: 1
from .criterions import cel
from .ddp_models import basic_ddp_model
from .hook_states import BasicHookState
from .hooks import allreduce_hook, hybrid_hook, rpc_hook, sparse_rpc_hook
from .iteration_steps import basic_iteration_step
from .preprocess_data import preprocess_dummy_data
from .trainer import DdpTrainer
criterion_map = {
Reported by Pylint.
Line: 5
Column: 1
from .ddp_models import basic_ddp_model
from .hook_states import BasicHookState
from .hooks import allreduce_hook, hybrid_hook, rpc_hook, sparse_rpc_hook
from .iteration_steps import basic_iteration_step
from .preprocess_data import preprocess_dummy_data
from .trainer import DdpTrainer
criterion_map = {
"cel": cel
Reported by Pylint.
Line: 6
Column: 1
from .hook_states import BasicHookState
from .hooks import allreduce_hook, hybrid_hook, rpc_hook, sparse_rpc_hook
from .iteration_steps import basic_iteration_step
from .preprocess_data import preprocess_dummy_data
from .trainer import DdpTrainer
criterion_map = {
"cel": cel
}
Reported by Pylint.
Line: 7
Column: 1
from .hooks import allreduce_hook, hybrid_hook, rpc_hook, sparse_rpc_hook
from .iteration_steps import basic_iteration_step
from .preprocess_data import preprocess_dummy_data
from .trainer import DdpTrainer
criterion_map = {
"cel": cel
}
Reported by Pylint.
Line: 1
Column: 1
from .criterions import cel
from .ddp_models import basic_ddp_model
from .hook_states import BasicHookState
from .hooks import allreduce_hook, hybrid_hook, rpc_hook, sparse_rpc_hook
from .iteration_steps import basic_iteration_step
from .preprocess_data import preprocess_dummy_data
from .trainer import DdpTrainer
criterion_map = {
Reported by Pylint.
caffe2/python/layers/blob_weighted_sum.py
8 issues
Line: 71
Column: 13
def add_ops(self, net):
net.WeightedSum(
[x for pair in zip(self.blobs, self.weights) for x in pair],
self.output_schema(),
grad_on_w=True,
)
Reported by Pylint.
Line: 1
Column: 1
## @package BlobWeightedSum
# Module caffe2.python.layers.blob_weighted_sum
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
Reported by Pylint.
Line: 17
Column: 5
This layer implements the weighted sum:
weighted element-wise sum of input blobs.
"""
def __init__(
self,
model,
input_record,
init_weights=None,
weight_optim=None,
Reported by Pylint.
Line: 26
Column: 9
name='blob_weighted_sum',
**kwargs
):
super(BlobWeightedSum, self).__init__(model, name, input_record, **kwargs)
self.blobs = self.input_record.field_blobs()
self.num_weights = len(self.blobs)
assert self.num_weights > 1, (
Reported by Pylint.
Line: 31
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self.blobs = self.input_record.field_blobs()
self.num_weights = len(self.blobs)
assert self.num_weights > 1, (
"BlobWeightedSum expects more than one input blobs"
)
assert len(input_record.field_types()[0].shape) > 0, (
"BlobWeightedSum expects limited dimensions of the input tensor"
Reported by Bandit.
Line: 35
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
"BlobWeightedSum expects more than one input blobs"
)
assert len(input_record.field_types()[0].shape) > 0, (
"BlobWeightedSum expects limited dimensions of the input tensor"
)
assert all(
input_record.field_types()[0].shape == input_record.field_types()[i].shape
Reported by Bandit.
Line: 39
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
"BlobWeightedSum expects limited dimensions of the input tensor"
)
assert all(
input_record.field_types()[0].shape == input_record.field_types()[i].shape
for i in range(1, self.num_weights)
), "Shape of input blobs should be the same shape {}".format(
input_record.field_types()[0].shape
)
Reported by Bandit.
Line: 47
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
)
if init_weights:
assert self.num_weights == len(init_weights), (
"the size of init_weights should be the same as input blobs, "
"expects {}, got {}".format(self.num_weights, len(init_weights))
)
else:
init_weights = [1.0] * self.num_weights
Reported by Bandit.
benchmarks/distributed/rpc/parameter_server/metrics/MetricBase.py
8 issues
Line: 1
Column: 1
from abc import ABC, abstractmethod
class MetricBase(ABC):
def __init__(self, name):
self.name = name
self.start = None
self.end = None
Reported by Pylint.
Line: 1
Column: 1
from abc import ABC, abstractmethod
class MetricBase(ABC):
def __init__(self, name):
self.name = name
self.start = None
self.end = None
Reported by Pylint.
Line: 4
Column: 1
from abc import ABC, abstractmethod
class MetricBase(ABC):
def __init__(self, name):
self.name = name
self.start = None
self.end = None
Reported by Pylint.
Line: 11
Column: 5
self.end = None
@abstractmethod
def record_start(self):
return
@abstractmethod
def record_end(self):
return
Reported by Pylint.
Line: 15
Column: 5
return
@abstractmethod
def record_end(self):
return
@abstractmethod
def elapsed_time(self):
return
Reported by Pylint.
Line: 19
Column: 5
return
@abstractmethod
def elapsed_time(self):
return
def get_name(self):
return self.name
Reported by Pylint.
Line: 22
Column: 5
def elapsed_time(self):
return
def get_name(self):
return self.name
def get_end(self):
return self.end
Reported by Pylint.
Line: 25
Column: 5
def get_name(self):
return self.name
def get_end(self):
return self.end
Reported by Pylint.