The following issues were found
caffe2/python/modeling/get_entry_from_blobs.py
9 issues
Line: 40
Column: 5
in blob[i1]
"""
def __init__(self, blobs, logging_frequency, i1=0, i2=0):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._i1 = i1
self._i2 = i2
self._field_name_suffix = '_{0}_{1}'.format(i1, i2) if i2 >= 0 \
Reported by Pylint.
Line: 48
Column: 5
self._field_name_suffix = '_{0}_{1}'.format(i1, i2) if i2 >= 0 \
else '_{0}_all'.format(i1)
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
i1, i2 = [self._i1, self._i2]
if i1 < 0:
raise ValueError('index is out of range')
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 48
Column: 5
self._field_name_suffix = '_{0}_{1}'.format(i1, i2) if i2 >= 0 \
else '_{0}_all'.format(i1)
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
i1, i2 = [self._i1, self._i2]
if i1 < 0:
raise ValueError('index is out of range')
Reported by Pylint.
Line: 51
Column: 9
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
i1, i2 = [self._i1, self._i2]
if i1 < 0:
raise ValueError('index is out of range')
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
Reported by Pylint.
Line: 51
Column: 13
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
i1, i2 = [self._i1, self._i2]
if i1 < 0:
raise ValueError('index is out of range')
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
Reported by Pylint.
Line: 57
Column: 1
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
blob_i1 = net.Slice([blob], starts=[i1, 0], ends=[i1 + 1, -1])
if self._i2 == -1:
blob_i1_i2 = net.Copy([blob_i1],
[net.NextScopedBlob(prefix=blob + '_{0}_all'.format(i1))])
Reported by Pylint.
Line: 57
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
blob_i1 = net.Slice([blob], starts=[i1, 0], ends=[i1 + 1, -1])
if self._i2 == -1:
blob_i1_i2 = net.Copy([blob_i1],
[net.NextScopedBlob(prefix=blob + '_{0}_all'.format(i1))])
Reported by Bandit.
Line: 82
Column: 5
else:
net.AppendOutputRecordField(output_field_name, output_scalar)
def field_name_suffix(self):
return self._field_name_suffix
Reported by Pylint.
caffe2/python/modeling/compute_histogram_for_blobs.py
9 issues
Line: 26
Column: 5
accumulate: boolean to output accumulate or per-batch histogram
"""
def __init__(self, blobs, logging_frequency, num_buckets=30,
lower_bound=0.0, upper_bound=1.0, accumulate=False):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._accumulate = accumulate
if self._accumulate:
Reported by Pylint.
Line: 42
Column: 5
self._lower_bound = float(lower_bound)
self._upper_bound = float(upper_bound)
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core, schema
from caffe2.python.modeling.net_modifier import NetModifier
import numpy as np
Reported by Pylint.
Line: 26
Column: 5
accumulate: boolean to output accumulate or per-batch histogram
"""
def __init__(self, blobs, logging_frequency, num_buckets=30,
lower_bound=0.0, upper_bound=1.0, accumulate=False):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._accumulate = accumulate
if self._accumulate:
Reported by Pylint.
Line: 37
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self._field_name_suffix = '_curr_normalized_hist'
self._num_buckets = int(num_buckets)
assert self._num_buckets > 0, (
"num_buckets need to be greater than 0, got {}".format(num_buckets))
self._lower_bound = float(lower_bound)
self._upper_bound = float(upper_bound)
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
Reported by Bandit.
Line: 42
Column: 5
self._lower_bound = float(lower_bound)
self._upper_bound = float(upper_bound)
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
Reported by Pylint.
Line: 46
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
modify_output_record=False):
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
blob_float = net.Cast(blob, net.NextScopedBlob(prefix=blob +
'_float'), to=core.DataType.FLOAT)
curr_hist, acc_hist = net.AccumulateHistogram(
[blob_float],
Reported by Bandit.
Line: 46
Column: 1
modify_output_record=False):
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
blob_float = net.Cast(blob, net.NextScopedBlob(prefix=blob +
'_float'), to=core.DataType.FLOAT)
curr_hist, acc_hist = net.AccumulateHistogram(
[blob_float],
Reported by Pylint.
Line: 91
Column: 5
output_field_name,
output_scalar)
def field_name_suffix(self):
return self._field_name_suffix
Reported by Pylint.
benchmarks/operator_benchmark/benchmark_runner.py
9 issues
Line: 3
Column: 1
import argparse
import torch
import benchmark_core
import benchmark_utils
"""Performance microbenchmarks's main binary.
Reported by Pylint.
Line: 5
Column: 1
import torch
import benchmark_core
import benchmark_utils
"""Performance microbenchmarks's main binary.
This is the main function for running performance microbenchmark tests.
Reported by Pylint.
Line: 6
Column: 1
import torch
import benchmark_core
import benchmark_utils
"""Performance microbenchmarks's main binary.
This is the main function for running performance microbenchmark tests.
It also registers existing benchmark tests via Python module imports.
Reported by Pylint.
Line: 8
Column: 1
import benchmark_core
import benchmark_utils
"""Performance microbenchmarks's main binary.
This is the main function for running performance microbenchmark tests.
It also registers existing benchmark tests via Python module imports.
"""
parser = argparse.ArgumentParser(
Reported by Pylint.
Line: 1
Column: 1
import argparse
import torch
import benchmark_core
import benchmark_utils
"""Performance microbenchmarks's main binary.
Reported by Pylint.
Line: 18
Column: 1
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
def parse_args():
parser.add_argument(
'--tag_filter',
help='tag_filter can be used to run the shapes which matches the tag. (all is used to run all the shapes)',
default='short')
Reported by Pylint.
Line: 21
Column: 1
def parse_args():
parser.add_argument(
'--tag_filter',
help='tag_filter can be used to run the shapes which matches the tag. (all is used to run all the shapes)',
default='short')
# This option is used to filter test cases to run.
parser.add_argument(
'--operators',
Reported by Pylint.
Line: 58
Column: 1
parser.add_argument(
"--num_runs",
help="Run each test for num_runs. Each run executes an operator for number of <--iterations>",
type=int,
default=1,
)
parser.add_argument(
Reported by Pylint.
Line: 148
Column: 1
return args
def main():
args = parse_args()
benchmark_core.BenchmarkRunner(args).run()
if __name__ == "__main__":
Reported by Pylint.
binaries/make_mnist_db.cc
9 issues
Line: 93
Column: 3
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
std::vector<char> pixels(rows * cols);
int count = 0;
const int kMaxKeyLength = 11;
char key_cstr[kMaxKeyLength];
TensorProtos protos;
TensorProto* data = protos.add_protos();
TensorProto* label = protos.add_protos();
data->set_data_type(TensorProto::BYTE);
Reported by FlawFinder.
Line: 64
Column: 14
CWE codes:
120
20
uint32_t rows;
uint32_t cols;
image_file.read(reinterpret_cast<char*>(&magic), 4);
magic = swap_endian(magic);
if (magic == 529205256) {
LOG(FATAL) <<
"It seems that you forgot to unzip the mnist dataset. You should "
"first unzip them using e.g. gunzip on Linux.";
Reported by FlawFinder.
Line: 72
Column: 14
CWE codes:
120
20
"first unzip them using e.g. gunzip on Linux.";
}
CAFFE_ENFORCE_EQ(magic, 2051, "Incorrect image file magic.");
label_file.read(reinterpret_cast<char*>(&magic), 4);
magic = swap_endian(magic);
CAFFE_ENFORCE_EQ(magic, 2049, "Incorrect label file magic.");
image_file.read(reinterpret_cast<char*>(&num_items), 4);
num_items = swap_endian(num_items);
label_file.read(reinterpret_cast<char*>(&num_labels), 4);
Reported by FlawFinder.
Line: 75
Column: 14
CWE codes:
120
20
label_file.read(reinterpret_cast<char*>(&magic), 4);
magic = swap_endian(magic);
CAFFE_ENFORCE_EQ(magic, 2049, "Incorrect label file magic.");
image_file.read(reinterpret_cast<char*>(&num_items), 4);
num_items = swap_endian(num_items);
label_file.read(reinterpret_cast<char*>(&num_labels), 4);
num_labels = swap_endian(num_labels);
CAFFE_ENFORCE_EQ(num_items, num_labels);
image_file.read(reinterpret_cast<char*>(&rows), 4);
Reported by FlawFinder.
Line: 77
Column: 14
CWE codes:
120
20
CAFFE_ENFORCE_EQ(magic, 2049, "Incorrect label file magic.");
image_file.read(reinterpret_cast<char*>(&num_items), 4);
num_items = swap_endian(num_items);
label_file.read(reinterpret_cast<char*>(&num_labels), 4);
num_labels = swap_endian(num_labels);
CAFFE_ENFORCE_EQ(num_items, num_labels);
image_file.read(reinterpret_cast<char*>(&rows), 4);
rows = swap_endian(rows);
image_file.read(reinterpret_cast<char*>(&cols), 4);
Reported by FlawFinder.
Line: 80
Column: 14
CWE codes:
120
20
label_file.read(reinterpret_cast<char*>(&num_labels), 4);
num_labels = swap_endian(num_labels);
CAFFE_ENFORCE_EQ(num_items, num_labels);
image_file.read(reinterpret_cast<char*>(&rows), 4);
rows = swap_endian(rows);
image_file.read(reinterpret_cast<char*>(&cols), 4);
cols = swap_endian(cols);
// leveldb
Reported by FlawFinder.
Line: 82
Column: 14
CWE codes:
120
20
CAFFE_ENFORCE_EQ(num_items, num_labels);
image_file.read(reinterpret_cast<char*>(&rows), 4);
rows = swap_endian(rows);
image_file.read(reinterpret_cast<char*>(&cols), 4);
cols = swap_endian(cols);
// leveldb
std::unique_ptr<db::DB> mnist_db(db::CreateDB(FLAGS_db, db_path, db::NEW));
std::unique_ptr<db::Transaction> transaction(mnist_db->NewTransaction());
Reported by FlawFinder.
Line: 114
Column: 16
CWE codes:
120
20
LOG(INFO) << "A total of " << num_items << " items.";
LOG(INFO) << "Rows: " << rows << " Cols: " << cols;
for (int item_id = 0; item_id < num_items; ++item_id) {
image_file.read(pixels.data(), rows * cols);
label_file.read(&label_value, 1);
for (int i = 0; i < rows * cols; ++i) {
data->set_byte_data(pixels.data(), rows * cols);
}
label->set_int32_data(0, static_cast<int>(label_value));
Reported by FlawFinder.
Line: 115
Column: 16
CWE codes:
120
20
LOG(INFO) << "Rows: " << rows << " Cols: " << cols;
for (int item_id = 0; item_id < num_items; ++item_id) {
image_file.read(pixels.data(), rows * cols);
label_file.read(&label_value, 1);
for (int i = 0; i < rows * cols; ++i) {
data->set_byte_data(pixels.data(), rows * cols);
}
label->set_int32_data(0, static_cast<int>(label_value));
snprintf(key_cstr, kMaxKeyLength, "%08d", item_id);
Reported by FlawFinder.
caffe2/python/layers/tags.py
9 issues
Line: 31
Column: 3
class Tags(object):
# TODO(amalevich): Tags might need to live in their own contexts, add this
# split later
EXCLUDE_FROM_TRAIN = 'exclude_from_train'
EXCLUDE_FROM_EVAL = 'exclude_from_eval'
EXCLUDE_FROM_PREDICTION = 'exclude_from_prediction'
EXCLUDE_FROM_ACCUMULATE_PRED = 'exclude_from_accumulate_pred'
Reported by Pylint.
Line: 70
Column: 5
"""
Valid tag prefixes for distributed training framework.
"""
"""
Used to pass on info to the 'extra_info' field in the net
Proto. Typically to provide info for distributed training.
"""
EXTRA_INFO = 'extra_info:'
"""
Reported by Pylint.
Line: 103
Column: 24
TagContext.current().add_tags(self.tags)
return self
def __exit__(self, type, value, traceback):
TagContext.current().remove_tags(self.tags)
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
Reported by Pylint.
Line: 1
Column: 1
## @package tags
# Module caffe2.python.layers.tags
import functools
Reported by Pylint.
Line: 22
Column: 5
# Tags is expected to be list to keep order of adding/removing things
self.tags = tags or []
def add_tags(self, tags):
self.tags.extend(tags)
def remove_tags(self, tags):
assert self.tags[-len(tags):] == tags
self.tags = self.tags[:-len(tags)]
Reported by Pylint.
Line: 25
Column: 5
def add_tags(self, tags):
self.tags.extend(tags)
def remove_tags(self, tags):
assert self.tags[-len(tags):] == tags
self.tags = self.tags[:-len(tags)]
class Tags(object):
Reported by Pylint.
Line: 26
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self.tags.extend(tags)
def remove_tags(self, tags):
assert self.tags[-len(tags):] == tags
self.tags = self.tags[:-len(tags)]
class Tags(object):
# TODO(amalevich): Tags might need to live in their own contexts, add this
Reported by Bandit.
Line: 30
Column: 1
self.tags = self.tags[:-len(tags)]
class Tags(object):
# TODO(amalevich): Tags might need to live in their own contexts, add this
# split later
EXCLUDE_FROM_TRAIN = 'exclude_from_train'
EXCLUDE_FROM_EVAL = 'exclude_from_eval'
EXCLUDE_FROM_PREDICTION = 'exclude_from_prediction'
Reported by Pylint.
Line: 30
Column: 1
self.tags = self.tags[:-len(tags)]
class Tags(object):
# TODO(amalevich): Tags might need to live in their own contexts, add this
# split later
EXCLUDE_FROM_TRAIN = 'exclude_from_train'
EXCLUDE_FROM_EVAL = 'exclude_from_eval'
EXCLUDE_FROM_PREDICTION = 'exclude_from_prediction'
Reported by Pylint.
torch/utils/tensorboard/__init__.py
9 issues
Line: 1
Column: 1
import tensorboard
from setuptools import distutils
LooseVersion = distutils.version.LooseVersion
if not hasattr(tensorboard, '__version__') or LooseVersion(tensorboard.__version__) < LooseVersion('1.15'):
raise ImportError('TensorBoard logging requires TensorBoard version 1.15 or above')
del distutils
Reported by Pylint.
Line: 13
Column: 1
del LooseVersion
del tensorboard
from .writer import FileWriter, SummaryWriter # noqa: F401
from tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401
Reported by Pylint.
Line: 14
Column: 1
del tensorboard
from .writer import FileWriter, SummaryWriter # noqa: F401
from tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401
Reported by Pylint.
Line: 1
Column: 1
import tensorboard
from setuptools import distutils
LooseVersion = distutils.version.LooseVersion
if not hasattr(tensorboard, '__version__') or LooseVersion(tensorboard.__version__) < LooseVersion('1.15'):
raise ImportError('TensorBoard logging requires TensorBoard version 1.15 or above')
del distutils
Reported by Pylint.
Line: 2
Column: 1
import tensorboard
from setuptools import distutils
LooseVersion = distutils.version.LooseVersion
if not hasattr(tensorboard, '__version__') or LooseVersion(tensorboard.__version__) < LooseVersion('1.15'):
raise ImportError('TensorBoard logging requires TensorBoard version 1.15 or above')
del distutils
Reported by Pylint.
Line: 6
Column: 1
LooseVersion = distutils.version.LooseVersion
if not hasattr(tensorboard, '__version__') or LooseVersion(tensorboard.__version__) < LooseVersion('1.15'):
raise ImportError('TensorBoard logging requires TensorBoard version 1.15 or above')
del distutils
del LooseVersion
del tensorboard
Reported by Pylint.
Line: 13
Column: 1
del LooseVersion
del tensorboard
from .writer import FileWriter, SummaryWriter # noqa: F401
from tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401
Reported by Pylint.
Line: 14
Column: 1
del tensorboard
from .writer import FileWriter, SummaryWriter # noqa: F401
from tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401
Reported by Pylint.
Line: 14
Column: 1
del tensorboard
from .writer import FileWriter, SummaryWriter # noqa: F401
from tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401
Reported by Pylint.
torch/utils/_cpp_extension_versioner.py
9 issues
Line: 1
Column: 1
import collections
Entry = collections.namedtuple('Entry', 'version, hash')
def update_hash(seed, value):
# Good old boost::hash_combine
# https://www.boost.org/doc/libs/1_35_0/doc/html/boost/hash_combine_id241013.html
Reported by Pylint.
Line: 7
Column: 1
Entry = collections.namedtuple('Entry', 'version, hash')
def update_hash(seed, value):
# Good old boost::hash_combine
# https://www.boost.org/doc/libs/1_35_0/doc/html/boost/hash_combine_id241013.html
return seed ^ (hash(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2))
Reported by Pylint.
Line: 13
Column: 1
return seed ^ (hash(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2))
def hash_source_files(hash_value, source_files):
for filename in source_files:
with open(filename) as file:
hash_value = update_hash(hash_value, file.read())
return hash_value
Reported by Pylint.
Line: 20
Column: 1
return hash_value
def hash_build_arguments(hash_value, build_arguments):
for group in build_arguments:
if group:
for argument in group:
hash_value = update_hash(hash_value, argument)
return hash_value
Reported by Pylint.
Line: 28
Column: 1
return hash_value
class ExtensionVersioner(object):
def __init__(self):
self.entries = {}
def get_version(self, name):
entry = self.entries.get(name)
Reported by Pylint.
Line: 28
Column: 1
return hash_value
class ExtensionVersioner(object):
def __init__(self):
self.entries = {}
def get_version(self, name):
entry = self.entries.get(name)
Reported by Pylint.
Line: 32
Column: 5
def __init__(self):
self.entries = {}
def get_version(self, name):
entry = self.entries.get(name)
return None if entry is None else entry.version
def bump_version_if_changed(self,
name,
Reported by Pylint.
Line: 36
Column: 5
entry = self.entries.get(name)
return None if entry is None else entry.version
def bump_version_if_changed(self,
name,
source_files,
build_arguments,
build_directory,
with_cuda,
Reported by Pylint.
Line: 36
Column: 5
entry = self.entries.get(name)
return None if entry is None else entry.version
def bump_version_if_changed(self,
name,
source_files,
build_arguments,
build_directory,
with_cuda,
Reported by Pylint.
torch/utils/data/_utils/pin_memory.py
9 issues
Line: 13
Column: 1
import torch
from torch._six import string_classes
from . import MP_STATUS_CHECK_INTERVAL
from torch._utils import ExceptionWrapper
def _pin_memory_loop(in_queue, out_queue, device_id, done_event):
# This setting is thread local, and prevents the copy in pin_memory from
Reported by Pylint.
Line: 20
Column: 5
def _pin_memory_loop(in_queue, out_queue, device_id, done_event):
# This setting is thread local, and prevents the copy in pin_memory from
# consuming all CPU cores.
torch.set_num_threads(1)
torch.cuda.set_device(device_id)
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
# logic of this function.
Reported by Pylint.
Line: 35
Column: 20
if not done_event.is_set() and not isinstance(data, ExceptionWrapper):
try:
data = pin_memory(data)
except Exception:
data = ExceptionWrapper(
where="in pin memory thread for device {}".format(device_id))
r = (idx, data)
while not done_event.is_set():
try:
Reported by Pylint.
Line: 14
Column: 1
import torch
from torch._six import string_classes
from . import MP_STATUS_CHECK_INTERVAL
from torch._utils import ExceptionWrapper
def _pin_memory_loop(in_queue, out_queue, device_id, done_event):
# This setting is thread local, and prevents the copy in pin_memory from
# consuming all CPU cores.
Reported by Pylint.
Line: 28
Column: 13
# logic of this function.
while not done_event.is_set():
try:
r = in_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
idx, data = r
if not done_event.is_set() and not isinstance(data, ExceptionWrapper):
try:
Reported by Pylint.
Line: 38
Column: 13
except Exception:
data = ExceptionWrapper(
where="in pin memory thread for device {}".format(device_id))
r = (idx, data)
while not done_event.is_set():
try:
out_queue.put(r, timeout=MP_STATUS_CHECK_INTERVAL)
break
except queue.Full:
Reported by Pylint.
Line: 48
Column: 1
del r # save memory
def pin_memory(data):
if isinstance(data, torch.Tensor):
return data.pin_memory()
elif isinstance(data, string_classes):
return data
elif isinstance(data, collections.abc.Mapping):
Reported by Pylint.
Line: 48
Column: 1
del r # save memory
def pin_memory(data):
if isinstance(data, torch.Tensor):
return data.pin_memory()
elif isinstance(data, string_classes):
return data
elif isinstance(data, collections.abc.Mapping):
Reported by Pylint.
Line: 49
Column: 5
def pin_memory(data):
if isinstance(data, torch.Tensor):
return data.pin_memory()
elif isinstance(data, string_classes):
return data
elif isinstance(data, collections.abc.Mapping):
return {k: pin_memory(sample) for k, sample in data.items()}
Reported by Pylint.
torch/utils/data/_utils/fetch.py
9 issues
Line: 44
Column: 5
class _MapDatasetFetcher(_BaseDatasetFetcher):
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
super(_MapDatasetFetcher, self).__init__(dataset, auto_collation, collate_fn, drop_last)
def fetch(self, possibly_batched_index):
if self.auto_collation:
data = [self.dataset[idx] for idx in possibly_batched_index]
Reported by Pylint.
Line: 7
Column: 1
"""
class _BaseDatasetFetcher(object):
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
self.dataset = dataset
self.auto_collation = auto_collation
self.collate_fn = collate_fn
self.drop_last = drop_last
Reported by Pylint.
Line: 7
Column: 1
"""
class _BaseDatasetFetcher(object):
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
self.dataset = dataset
self.auto_collation = auto_collation
self.collate_fn = collate_fn
self.drop_last = drop_last
Reported by Pylint.
Line: 14
Column: 5
self.collate_fn = collate_fn
self.drop_last = drop_last
def fetch(self, possibly_batched_index):
raise NotImplementedError()
class _IterableDatasetFetcher(_BaseDatasetFetcher):
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
Reported by Pylint.
Line: 18
Column: 1
raise NotImplementedError()
class _IterableDatasetFetcher(_BaseDatasetFetcher):
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
super(_IterableDatasetFetcher, self).__init__(dataset, auto_collation, collate_fn, drop_last)
self.dataset_iter = iter(dataset)
self.ended = False
Reported by Pylint.
Line: 20
Column: 9
class _IterableDatasetFetcher(_BaseDatasetFetcher):
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
super(_IterableDatasetFetcher, self).__init__(dataset, auto_collation, collate_fn, drop_last)
self.dataset_iter = iter(dataset)
self.ended = False
def fetch(self, possibly_batched_index):
if self.ended:
Reported by Pylint.
Line: 20
Column: 1
class _IterableDatasetFetcher(_BaseDatasetFetcher):
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
super(_IterableDatasetFetcher, self).__init__(dataset, auto_collation, collate_fn, drop_last)
self.dataset_iter = iter(dataset)
self.ended = False
def fetch(self, possibly_batched_index):
if self.ended:
Reported by Pylint.
Line: 43
Column: 1
return self.collate_fn(data)
class _MapDatasetFetcher(_BaseDatasetFetcher):
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
super(_MapDatasetFetcher, self).__init__(dataset, auto_collation, collate_fn, drop_last)
def fetch(self, possibly_batched_index):
if self.auto_collation:
Reported by Pylint.
Line: 45
Column: 9
class _MapDatasetFetcher(_BaseDatasetFetcher):
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
super(_MapDatasetFetcher, self).__init__(dataset, auto_collation, collate_fn, drop_last)
def fetch(self, possibly_batched_index):
if self.auto_collation:
data = [self.dataset[idx] for idx in possibly_batched_index]
else:
Reported by Pylint.
torch/utils/data/_utils/__init__.py
9 issues
Line: 46
Column: 5
def _set_python_exit_flag():
global python_exit_status
python_exit_status = True
atexit.register(_set_python_exit_flag)
Reported by Pylint.
Line: 52
Column: 1
atexit.register(_set_python_exit_flag)
from . import worker, signal_handling, pin_memory, collate, fetch
Reported by Pylint.
Line: 52
Column: 1
atexit.register(_set_python_exit_flag)
from . import worker, signal_handling, pin_memory, collate, fetch
Reported by Pylint.
Line: 52
Column: 1
atexit.register(_set_python_exit_flag)
from . import worker, signal_handling, pin_memory, collate, fetch
Reported by Pylint.
Line: 52
Column: 1
atexit.register(_set_python_exit_flag)
from . import worker, signal_handling, pin_memory, collate, fetch
Reported by Pylint.
Line: 52
Column: 1
atexit.register(_set_python_exit_flag)
from . import worker, signal_handling, pin_memory, collate, fetch
Reported by Pylint.
Line: 27
Column: 1
sender is alive to prevent hanging."""
python_exit_status = False
r"""Whether Python is shutting down. This flag is guaranteed to be set before
the Python core library resources are freed, but Python may already be exiting
for some time when this is set.
Hook to set this flag is `_set_python_exit_flag`, and is inspired by a similar
Reported by Pylint.
Line: 46
Column: 5
def _set_python_exit_flag():
global python_exit_status
python_exit_status = True
atexit.register(_set_python_exit_flag)
Reported by Pylint.
Line: 52
Column: 1
atexit.register(_set_python_exit_flag)
from . import worker, signal_handling, pin_memory, collate, fetch
Reported by Pylint.