The following issues were found
aten/src/ATen/cpu/vec/vec512/vec512_int.h
8 issues
Line: 106
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (auto i = 0; i < size(); ++i) {
tmp_values[i] = 0;
}
std::memcpy(tmp_values, ptr, count * sizeof(int64_t));
return loadu(tmp_values);
}
void store(void* ptr, int count = size()) const {
if (count == size()) {
// ptr need not to be aligned here. See
Reported by FlawFinder.
Line: 117
Column: 12
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
} else if (count > 0) {
__at_align__ int64_t tmp_values[size()];
_mm512_storeu_si512(reinterpret_cast<__m512i*>(tmp_values), values);
std::memcpy(ptr, tmp_values, count * sizeof(int64_t));
}
}
const int64_t& operator[](int idx) const = delete;
int64_t& operator[](int idx) = delete;
Vectorized<int64_t> abs() const {
Reported by FlawFinder.
Line: 259
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (auto i = 0; i < size(); ++i) {
tmp_values[i] = 0;
}
std::memcpy(tmp_values, ptr, count * sizeof(int32_t));
return loadu(tmp_values);
}
void store(void* ptr, int count = size()) const {
if (count == size()) {
// ptr need not to be aligned here. See
Reported by FlawFinder.
Line: 270
Column: 12
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
} else if (count > 0) {
__at_align__ int32_t tmp_values[size()];
_mm512_storeu_si512(reinterpret_cast<__m512i*>(tmp_values), values);
std::memcpy(ptr, tmp_values, count * sizeof(int32_t));
}
}
void dump() const {
for (size_t i = 0; i < size(); ++i) {
std::cout << (int)((value_type*)&values)[i] << " ";
Reported by FlawFinder.
Line: 497
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (auto i = 0; i < size(); ++i) {
tmp_values[i] = 0;
}
std::memcpy(tmp_values, ptr, count * sizeof(int16_t));
return loadu(tmp_values);
}
void store(void* ptr, int count = size()) const {
if (count == size()) {
// ptr need not to be aligned here. See
Reported by FlawFinder.
Line: 508
Column: 12
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
} else if (count > 0) {
__at_align__ int16_t tmp_values[size()];
_mm512_storeu_si512(reinterpret_cast<__m512i*>(tmp_values), values);
std::memcpy(ptr, tmp_values, count * sizeof(int16_t));
}
}
const int16_t& operator[](int idx) const = delete;
int16_t& operator[](int idx) = delete;
Vectorized<int16_t> abs() const {
Reported by FlawFinder.
Line: 773
Column: 10
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (size_t i = 0; i < size(); ++i) {
tmp_values[i] = 0;
}
std::memcpy(tmp_values, ptr, count * sizeof(int8_t));
return loadu(tmp_values);
}
void store(void* ptr, int count = size()) const {
if (count == size()) {
// ptr need not to be aligned here. See
Reported by FlawFinder.
Line: 784
Column: 12
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
} else if (count > 0) {
__at_align__ int8_t tmp_values[size()];
_mm512_storeu_si512(reinterpret_cast<__m512i*>(tmp_values), values);
std::memcpy(ptr, tmp_values, count * sizeof(int8_t));
}
}
const int8_t& operator[](int idx) const = delete;
int8_t& operator[](int idx) = delete;
Vectorized<int8_t> abs() const {
Reported by FlawFinder.
caffe2/python/layers/batch_normalization.py
8 issues
Line: 1
Column: 1
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
Reported by Pylint.
Line: 12
Column: 1
import numpy as np
class BatchNormalization(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_normalization',
Reported by Pylint.
Line: 12
Column: 1
import numpy as np
class BatchNormalization(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_normalization',
Reported by Pylint.
Line: 13
Column: 5
class BatchNormalization(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_normalization',
scale_optim=None,
Reported by Pylint.
Line: 25
Column: 9
scale_init_value=1.0,
**kwargs
):
super(BatchNormalization, self).__init__(
model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
self.input_shape = input_record.field_type().shape
Reported by Pylint.
Line: 28
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
super(BatchNormalization, self).__init__(
model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
self.input_shape = input_record.field_type().shape
if len(self.input_shape) == 3:
if order == "NCHW":
Reported by Bandit.
Line: 40
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
else:
raise ValueError("Please specify a correct order")
else:
assert len(self.input_shape) == 1, (
"This layer supports only 4D or 2D tensors")
input_dims = self.input_shape[0]
self.output_schema = schema.Scalar(
(np.float32, self.input_shape),
Reported by Bandit.
Line: 60
Column: 9
shape=[input_dims],
initializer=('ConstantFill', {'value': 0.0}),
optimizer=bias_optim)
self.rm = self.create_param(param_name='running_mean',
shape=[input_dims],
initializer=('ConstantFill', {'value': 0.0}),
optimizer=model.NoOptim)
self.riv = self.create_param(param_name='running_inv_var',
shape=[input_dims],
Reported by Pylint.
benchmarks/operator_benchmark/benchmark_all_test.py
7 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
from pt import ( # noqa: F401
unary_test,
)
import benchmark_all_other_test # noqa: F401
import benchmark_all_quantized_test # noqa: F401
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 5
Column: 1
from pt import ( # noqa: F401
unary_test,
)
import benchmark_all_other_test # noqa: F401
import benchmark_all_quantized_test # noqa: F401
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 6
Column: 1
unary_test,
)
import benchmark_all_other_test # noqa: F401
import benchmark_all_quantized_test # noqa: F401
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 2
Column: 1
import operator_benchmark as op_bench
from pt import ( # noqa: F401
unary_test,
)
import benchmark_all_other_test # noqa: F401
import benchmark_all_quantized_test # noqa: F401
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 5
Column: 1
from pt import ( # noqa: F401
unary_test,
)
import benchmark_all_other_test # noqa: F401
import benchmark_all_quantized_test # noqa: F401
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 6
Column: 1
unary_test,
)
import benchmark_all_other_test # noqa: F401
import benchmark_all_quantized_test # noqa: F401
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
from pt import ( # noqa: F401
unary_test,
)
import benchmark_all_other_test # noqa: F401
import benchmark_all_quantized_test # noqa: F401
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
benchmarks/profiler_benchmark/resnet_memory_profiler.py
7 issues
Line: 1
Column: 1
import torch
import torchvision.models as models
import torch.autograd.profiler as profiler
for with_cuda in [False, True]:
model = models.resnet18()
inputs = torch.randn(5, 3, 224, 224)
sort_key = "self_cpu_memory_usage"
Reported by Pylint.
Line: 2
Column: 1
import torch
import torchvision.models as models
import torch.autograd.profiler as profiler
for with_cuda in [False, True]:
model = models.resnet18()
inputs = torch.randn(5, 3, 224, 224)
sort_key = "self_cpu_memory_usage"
Reported by Pylint.
Line: 4
Column: 1
import torch
import torchvision.models as models
import torch.autograd.profiler as profiler
for with_cuda in [False, True]:
model = models.resnet18()
inputs = torch.randn(5, 3, 224, 224)
sort_key = "self_cpu_memory_usage"
Reported by Pylint.
Line: 1
Column: 1
import torch
import torchvision.models as models
import torch.autograd.profiler as profiler
for with_cuda in [False, True]:
model = models.resnet18()
inputs = torch.randn(5, 3, 224, 224)
sort_key = "self_cpu_memory_usage"
Reported by Pylint.
Line: 4
Column: 1
import torch
import torchvision.models as models
import torch.autograd.profiler as profiler
for with_cuda in [False, True]:
model = models.resnet18()
inputs = torch.randn(5, 3, 224, 224)
sort_key = "self_cpu_memory_usage"
Reported by Pylint.
Line: 9
Column: 5
for with_cuda in [False, True]:
model = models.resnet18()
inputs = torch.randn(5, 3, 224, 224)
sort_key = "self_cpu_memory_usage"
if with_cuda and torch.cuda.is_available():
model = model.cuda()
inputs = inputs.cuda()
sort_key = "self_cuda_memory_usage"
print("Profiling CUDA Resnet model")
Reported by Pylint.
Line: 13
Column: 9
if with_cuda and torch.cuda.is_available():
model = model.cuda()
inputs = inputs.cuda()
sort_key = "self_cuda_memory_usage"
print("Profiling CUDA Resnet model")
else:
print("Profiling CPU Resnet model")
with profiler.profile(profile_memory=True, record_shapes=True) as prof:
Reported by Pylint.
android/pytorch_android/src/androidTest/java/org/pytorch/PytorchInstrumentedTests.java
7 issues
Line: 25
}
try (InputStream is = appContext.getAssets().open(assetName)) {
try (OutputStream os = new FileOutputStream(file)) {
byte[] buffer = new byte[4 * 1024];
int read;
while ((read = is.read(buffer)) != -1) {
os.write(buffer, 0, read);
}
Reported by PMD.
Line: 18
@Override
protected String assetFilePath(String assetName) throws IOException {
final Context appContext = InstrumentationRegistry.getInstrumentation().getTargetContext();
File file = new File(appContext.getFilesDir(), assetName);
if (file.exists() && file.length() > 0) {
return file.getAbsolutePath();
}
Reported by PMD.
Line: 19
@Override
protected String assetFilePath(String assetName) throws IOException {
final Context appContext = InstrumentationRegistry.getInstrumentation().getTargetContext();
File file = new File(appContext.getFilesDir(), assetName);
if (file.exists() && file.length() > 0) {
return file.getAbsolutePath();
}
try (InputStream is = appContext.getAssets().open(assetName)) {
Reported by PMD.
Line: 24
return file.getAbsolutePath();
}
try (InputStream is = appContext.getAssets().open(assetName)) {
try (OutputStream os = new FileOutputStream(file)) {
byte[] buffer = new byte[4 * 1024];
int read;
while ((read = is.read(buffer)) != -1) {
os.write(buffer, 0, read);
Reported by PMD.
Line: 28
try (OutputStream os = new FileOutputStream(file)) {
byte[] buffer = new byte[4 * 1024];
int read;
while ((read = is.read(buffer)) != -1) {
os.write(buffer, 0, read);
}
os.flush();
}
return file.getAbsolutePath();
Reported by PMD.
Line: 34
os.flush();
}
return file.getAbsolutePath();
} catch (IOException e) {
throw e;
}
}
}
Reported by PMD.
Line: 28
try (OutputStream os = new FileOutputStream(file)) {
byte[] buffer = new byte[4 * 1024];
int read;
while ((read = is.read(buffer)) != -1) {
os.write(buffer, 0, read);
}
os.flush();
}
return file.getAbsolutePath();
Reported by PMD.
benchmarks/distributed/rpc/parameter_server/metrics/CPUMetric.py
7 issues
Line: 3
Column: 1
import time
from .MetricBase import MetricBase
class CPUMetric(MetricBase):
def __init__(self, name: str):
self.name = name
self.start = None
Reported by Pylint.
Line: 1
Column: 1
import time
from .MetricBase import MetricBase
class CPUMetric(MetricBase):
def __init__(self, name: str):
self.name = name
self.start = None
Reported by Pylint.
Line: 1
Column: 1
import time
from .MetricBase import MetricBase
class CPUMetric(MetricBase):
def __init__(self, name: str):
self.name = name
self.start = None
Reported by Pylint.
Line: 6
Column: 1
from .MetricBase import MetricBase
class CPUMetric(MetricBase):
def __init__(self, name: str):
self.name = name
self.start = None
self.end = None
Reported by Pylint.
Line: 12
Column: 5
self.start = None
self.end = None
def record_start(self):
self.start = time.time()
def record_end(self):
self.end = time.time()
Reported by Pylint.
Line: 15
Column: 5
def record_start(self):
self.start = time.time()
def record_end(self):
self.end = time.time()
def elapsed_time(self):
if self.start is None:
raise RuntimeError("start is None")
Reported by Pylint.
Line: 18
Column: 5
def record_end(self):
self.end = time.time()
def elapsed_time(self):
if self.start is None:
raise RuntimeError("start is None")
if self.end is None:
raise RuntimeError("end is None")
return self.end - self.start
Reported by Pylint.
benchmarks/distributed/pipeline/benchmark_dataset.py
7 issues
Line: 1
Column: 1
import torch
from torch.utils.data import Dataset
def collate_sentences_lm(samples):
if len(samples) == 0:
return {}
Reported by Pylint.
Line: 2
Column: 1
import torch
from torch.utils.data import Dataset
def collate_sentences_lm(samples):
if len(samples) == 0:
return {}
Reported by Pylint.
Line: 10
Column: 5
if len(samples) == 0:
return {}
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = torch.stack([s["source"] for s in samples], 0)
tgt_tokens = torch.stack([s["target"] for s in samples], 0)
ntokens = len(samples) * len(samples[0]["target"])
src_lengths = torch.LongTensor([len(samples[0]["source"])] * len(samples))
Reported by Pylint.
Line: 14
Column: 5
src_tokens = torch.stack([s["source"] for s in samples], 0)
tgt_tokens = torch.stack([s["target"] for s in samples], 0)
ntokens = len(samples) * len(samples[0]["target"])
src_lengths = torch.LongTensor([len(samples[0]["source"])] * len(samples))
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
Reported by Pylint.
Line: 1
Column: 1
import torch
from torch.utils.data import Dataset
def collate_sentences_lm(samples):
if len(samples) == 0:
return {}
Reported by Pylint.
Line: 5
Column: 1
from torch.utils.data import Dataset
def collate_sentences_lm(samples):
if len(samples) == 0:
return {}
id = torch.LongTensor([s["id"] for s in samples])
Reported by Pylint.
Line: 10
Column: 5
if len(samples) == 0:
return {}
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = torch.stack([s["source"] for s in samples], 0)
tgt_tokens = torch.stack([s["target"] for s in samples], 0)
ntokens = len(samples) * len(samples[0]["target"])
src_lengths = torch.LongTensor([len(samples[0]["source"])] * len(samples))
Reported by Pylint.
caffe2/python/layers/dropout.py
7 issues
Line: 1
Column: 1
# Module caffe2.python.layers.dropout
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
Reported by Pylint.
Line: 11
Column: 1
from caffe2.python.layers.layers import ModelLayer
class Dropout(ModelLayer):
def __init__(
self,
model,
input_record,
Reported by Pylint.
Line: 13
Column: 5
class Dropout(ModelLayer):
def __init__(
self,
model,
input_record,
name='dropout',
ratio=0.5,
Reported by Pylint.
Line: 22
Column: 9
dropout_for_eval=False,
**kwargs):
super(Dropout, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert (ratio >= 0 and ratio < 1.0), \
"Expected 0 <= ratio < 1, but got ratio of %s" % ratio
self.output_schema = input_record.clone_schema()
Reported by Pylint.
Line: 23
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
**kwargs):
super(Dropout, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert (ratio >= 0 and ratio < 1.0), \
"Expected 0 <= ratio < 1, but got ratio of %s" % ratio
self.output_schema = input_record.clone_schema()
self.output_schema.set_value(self.get_next_blob_reference('output'))
Reported by Bandit.
Line: 24
Column: 17
super(Dropout, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert (ratio >= 0 and ratio < 1.0), \
"Expected 0 <= ratio < 1, but got ratio of %s" % ratio
self.output_schema = input_record.clone_schema()
self.output_schema.set_value(self.get_next_blob_reference('output'))
self.dropout_for_eval = dropout_for_eval
Reported by Pylint.
Line: 24
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
super(Dropout, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert (ratio >= 0 and ratio < 1.0), \
"Expected 0 <= ratio < 1, but got ratio of %s" % ratio
self.output_schema = input_record.clone_schema()
self.output_schema.set_value(self.get_next_blob_reference('output'))
self.dropout_for_eval = dropout_for_eval
Reported by Bandit.
aten/src/ATen/test/MaybeOwned_test.cpp
7 issues
Line: 156
TYPED_TEST_CASE(MaybeOwnedTest, MaybeOwnedTypes);
TYPED_TEST(MaybeOwnedTest, SimpleDereferencingString) {
assertBorrow(this->borrowed, this->borrowFrom);
assertOwn(this->owned, this->ownCopy);
assertOwn(this->owned2, this->ownCopy2);
}
Reported by Cppcheck.
Line: 55
Column: 6
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
T getSampleValue2();
template <typename T>
bool equal(const T&, const T&);
template <typename T>
void assertBorrow(const c10::MaybeOwned<T>&, const T&);
template <typename T>
Reported by FlawFinder.
Line: 75
Column: 6
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
}
template<>
bool equal(const c10::intrusive_ptr<MyString>& lhs, const c10::intrusive_ptr<MyString>& rhs) {
if (!lhs || !rhs) {
return !lhs && !rhs;
}
return *lhs == *rhs;
}
Reported by FlawFinder.
Line: 115
Column: 6
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
}
template<>
bool equal(const Tensor& lhs, const Tensor& rhs) {
if (!lhs.defined() || !rhs.defined()) {
return !lhs.defined() && !rhs.defined();
}
return at::native::cpu_equal(lhs, rhs);
}
Reported by FlawFinder.
Line: 193
Column: 15
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
// Need a different value.
this->owned = c10::MaybeOwned<TypeParam>::owned(c10::in_place, getSampleValue2<TypeParam>());
EXPECT_TRUE(equal(*std::move(this->borrowed), getSampleValue<TypeParam>()));
EXPECT_TRUE(equal(*std::move(this->owned), getSampleValue2<TypeParam>()));
// Borrowed is unaffected.
assertBorrow(this->borrowed, this->borrowFrom);
Reported by FlawFinder.
Line: 194
Column: 15
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
this->owned = c10::MaybeOwned<TypeParam>::owned(c10::in_place, getSampleValue2<TypeParam>());
EXPECT_TRUE(equal(*std::move(this->borrowed), getSampleValue<TypeParam>()));
EXPECT_TRUE(equal(*std::move(this->owned), getSampleValue2<TypeParam>()));
// Borrowed is unaffected.
assertBorrow(this->borrowed, this->borrowFrom);
// Owned is a null c10::intrusive_ptr / empty Tensor.
Reported by FlawFinder.
Line: 200
Column: 15
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
assertBorrow(this->borrowed, this->borrowFrom);
// Owned is a null c10::intrusive_ptr / empty Tensor.
EXPECT_TRUE(equal(*this->owned, TypeParam()));
}
TYPED_TEST(MaybeOwnedTest, MoveConstructor) {
auto movedBorrowed(std::move(this->borrowed));
auto movedOwned(std::move(this->owned));
Reported by FlawFinder.
caffe2/python/layers/gather_record.py
7 issues
Line: 60
Column: 3
if lengths_blob is None:
lengths_blob = record.lengths()
else:
# TODO(kittipat): This is a hacky solution until LengthsSum for int
# is implemented
lengths_float = net.Cast(
record.lengths(),
net.NextScopedBlob(str(record.lengths()) + '_float'),
to=core.DataType.FLOAT,
Reported by Pylint.
Line: 76
Column: 58
net.NextScopedBlob(str(record.lengths()) + "_nested"),
to=core.DataType.INT32,
)
self._dispatch(net, record._items, lengths_blob, output_record._items)
def _dispatch(self, net, record, lengths_blob, output_record):
if isinstance(record, schema.Scalar):
self._gather_scalar(net, record, lengths_blob, output_record)
elif isinstance(record, schema.Struct):
Reported by Pylint.
Line: 76
Column: 29
net.NextScopedBlob(str(record.lengths()) + "_nested"),
to=core.DataType.INT32,
)
self._dispatch(net, record._items, lengths_blob, output_record._items)
def _dispatch(self, net, record, lengths_blob, output_record):
if isinstance(record, schema.Scalar):
self._gather_scalar(net, record, lengths_blob, output_record)
elif isinstance(record, schema.Struct):
Reported by Pylint.
Line: 1
Column: 1
## @package gather_record
# Module caffe2.python.layers.gather_record
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
Reported by Pylint.
Line: 33
Column: 9
"""
def __init__(self, model, input_record, name='gather_record', **kwargs):
super(GatherRecord, self).__init__(model, name, input_record, **kwargs)
assert 'indices' in input_record
assert 'record' in input_record
self.output_schema = schema.NewRecord(
Reported by Pylint.
Line: 35
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def __init__(self, model, input_record, name='gather_record', **kwargs):
super(GatherRecord, self).__init__(model, name, input_record, **kwargs)
assert 'indices' in input_record
assert 'record' in input_record
self.output_schema = schema.NewRecord(
model.net, input_record.record.clone_schema())
Reported by Bandit.
Line: 36
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
super(GatherRecord, self).__init__(model, name, input_record, **kwargs)
assert 'indices' in input_record
assert 'record' in input_record
self.output_schema = schema.NewRecord(
model.net, input_record.record.clone_schema())
self._indices = self.input_record.indices()
Reported by Bandit.