The following issues were found
torch/nn/_reduction.py
8 issues
Line: 18
Column: 3
elif reduction == 'sum':
ret = 2
else:
ret = -1 # TODO: remove once JIT exceptions support control flow
raise ValueError("{} is not a valid value for reduction".format(reduction))
return ret
# In order to support previous versions, accept boolean size_average and reduce
# and convert them into the new constants for now
Reported by Pylint.
Line: 1
Column: 1
from typing import Optional
import warnings
# NB: Keep this file in sync with enums in aten/src/ATen/core/Reduction.h
def get_enum(reduction: str) -> int:
if reduction == 'none':
ret = 0
Reported by Pylint.
Line: 7
Column: 1
# NB: Keep this file in sync with enums in aten/src/ATen/core/Reduction.h
def get_enum(reduction: str) -> int:
if reduction == 'none':
ret = 0
elif reduction == 'mean':
ret = 1
elif reduction == 'elementwise_mean':
Reported by Pylint.
Line: 13
Column: 1
elif reduction == 'mean':
ret = 1
elif reduction == 'elementwise_mean':
warnings.warn("reduction='elementwise_mean' is deprecated, please use reduction='mean' instead.")
ret = 1
elif reduction == 'sum':
ret = 2
else:
ret = -1 # TODO: remove once JIT exceptions support control flow
Reported by Pylint.
Line: 27
Column: 1
# We use these functions in torch/legacy as well, in which case we'll silence the warning
def legacy_get_string(size_average: Optional[bool], reduce: Optional[bool], emit_warning: bool = True) -> str:
warning = "size_average and reduce args will be deprecated, please use reduction='{}' instead."
if size_average is None:
size_average = True
if reduce is None:
Reported by Pylint.
Line: 27
Column: 1
# We use these functions in torch/legacy as well, in which case we'll silence the warning
def legacy_get_string(size_average: Optional[bool], reduce: Optional[bool], emit_warning: bool = True) -> str:
warning = "size_average and reduce args will be deprecated, please use reduction='{}' instead."
if size_average is None:
size_average = True
if reduce is None:
Reported by Pylint.
Line: 46
Column: 1
return ret
def legacy_get_enum(size_average: Optional[bool], reduce: Optional[bool], emit_warning: bool = True) -> int:
return get_enum(legacy_get_string(size_average, reduce, emit_warning))
Reported by Pylint.
Line: 46
Column: 1
return ret
def legacy_get_enum(size_average: Optional[bool], reduce: Optional[bool], emit_warning: bool = True) -> int:
return get_enum(legacy_get_string(size_average, reduce, emit_warning))
Reported by Pylint.
caffe2/python/test/python_protobuf_test.py
8 issues
Line: 1
Column: 1
# make sure we use cpp implementation of protobuf
import os
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
# then import protobuf
from caffe2.proto import caffe2_pb2, metanet_pb2
Reported by Pylint.
Line: 9
Column: 1
import os
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
# then import protobuf
from caffe2.proto import caffe2_pb2, metanet_pb2
import unittest
class TestCrossProtoCalls(unittest.TestCase):
Reported by Pylint.
Line: 11
Column: 1
# then import protobuf
from caffe2.proto import caffe2_pb2, metanet_pb2
import unittest
class TestCrossProtoCalls(unittest.TestCase):
def testSimple(self):
net = caffe2_pb2.NetDef()
Reported by Pylint.
Line: 11
Column: 1
# then import protobuf
from caffe2.proto import caffe2_pb2, metanet_pb2
import unittest
class TestCrossProtoCalls(unittest.TestCase):
def testSimple(self):
net = caffe2_pb2.NetDef()
Reported by Pylint.
Line: 14
Column: 1
import unittest
class TestCrossProtoCalls(unittest.TestCase):
def testSimple(self):
net = caffe2_pb2.NetDef()
meta = metanet_pb2.MetaNetDef()
# if metanet_pb2 wasn't initialized properly the following fails with a
# cryptic message: "Parameter to MergeFrom() must be instance of same
Reported by Pylint.
Line: 15
Column: 5
class TestCrossProtoCalls(unittest.TestCase):
def testSimple(self):
net = caffe2_pb2.NetDef()
meta = metanet_pb2.MetaNetDef()
# if metanet_pb2 wasn't initialized properly the following fails with a
# cryptic message: "Parameter to MergeFrom() must be instance of same
# class: expected caffe2.NetDef got caffe2.NetDef."
Reported by Pylint.
Line: 15
Column: 5
class TestCrossProtoCalls(unittest.TestCase):
def testSimple(self):
net = caffe2_pb2.NetDef()
meta = metanet_pb2.MetaNetDef()
# if metanet_pb2 wasn't initialized properly the following fails with a
# cryptic message: "Parameter to MergeFrom() must be instance of same
# class: expected caffe2.NetDef got caffe2.NetDef."
Reported by Pylint.
Line: 15
Column: 5
class TestCrossProtoCalls(unittest.TestCase):
def testSimple(self):
net = caffe2_pb2.NetDef()
meta = metanet_pb2.MetaNetDef()
# if metanet_pb2 wasn't initialized properly the following fails with a
# cryptic message: "Parameter to MergeFrom() must be instance of same
# class: expected caffe2.NetDef got caffe2.NetDef."
Reported by Pylint.
caffe2/python/operator_test/lengths_tile_op_test.py
8 issues
Line: 9
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestLengthsTileOp(serial.SerializedTestCase):
Reported by Pylint.
Line: 24
Column: 45
)
),
**hu.gcs)
def test_lengths_tile(self, inputs, gc, dc):
data, lengths = inputs
def lengths_tile_op(data, lengths):
return [np.concatenate([
[d] * l for d, l in zip(data, lengths)
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
Reported by Pylint.
Line: 13
Column: 1
import numpy as np
class TestLengthsTileOp(serial.SerializedTestCase):
@serial.given(
inputs=st.integers(min_value=1, max_value=20).flatmap(
lambda size: st.tuples(
hu.arrays([size], dtype=np.float32),
Reported by Pylint.
Line: 24
Column: 5
)
),
**hu.gcs)
def test_lengths_tile(self, inputs, gc, dc):
data, lengths = inputs
def lengths_tile_op(data, lengths):
return [np.concatenate([
[d] * l for d, l in zip(data, lengths)
Reported by Pylint.
Line: 24
Column: 5
)
),
**hu.gcs)
def test_lengths_tile(self, inputs, gc, dc):
data, lengths = inputs
def lengths_tile_op(data, lengths):
return [np.concatenate([
[d] * l for d, l in zip(data, lengths)
Reported by Pylint.
Line: 24
Column: 5
)
),
**hu.gcs)
def test_lengths_tile(self, inputs, gc, dc):
data, lengths = inputs
def lengths_tile_op(data, lengths):
return [np.concatenate([
[d] * l for d, l in zip(data, lengths)
Reported by Pylint.
Line: 32
Column: 9
[d] * l for d, l in zip(data, lengths)
])]
op = core.CreateOperator(
"LengthsTile",
["data", "lengths"],
["output"],
)
Reported by Pylint.
test/jit/_imported_class_test/foo.py
8 issues
Line: 1
Column: 1
import torch
from . import bar
# This file contains definitions of script classes.
# They are used by test_jit.py to test ScriptClass imports
@torch.jit.script # noqa: B903
class FooSameName(object):
def __init__(self, x):
Reported by Pylint.
Line: 2
Column: 1
import torch
from . import bar
# This file contains definitions of script classes.
# They are used by test_jit.py to test ScriptClass imports
@torch.jit.script # noqa: B903
class FooSameName(object):
def __init__(self, x):
Reported by Pylint.
Line: 1
Column: 1
import torch
from . import bar
# This file contains definitions of script classes.
# They are used by test_jit.py to test ScriptClass imports
@torch.jit.script # noqa: B903
class FooSameName(object):
def __init__(self, x):
Reported by Pylint.
Line: 1
Column: 1
import torch
from . import bar
# This file contains definitions of script classes.
# They are used by test_jit.py to test ScriptClass imports
@torch.jit.script # noqa: B903
class FooSameName(object):
def __init__(self, x):
Reported by Pylint.
Line: 8
Column: 1
@torch.jit.script # noqa: B903
class FooSameName(object):
def __init__(self, x):
self.x = x
self.nested = bar.FooSameName(x)
Reported by Pylint.
Line: 8
Column: 1
@torch.jit.script # noqa: B903
class FooSameName(object):
def __init__(self, x):
self.x = x
self.nested = bar.FooSameName(x)
Reported by Pylint.
Line: 8
Column: 1
@torch.jit.script # noqa: B903
class FooSameName(object):
def __init__(self, x):
self.x = x
self.nested = bar.FooSameName(x)
Reported by Pylint.
Line: 10
Column: 9
@torch.jit.script # noqa: B903
class FooSameName(object):
def __init__(self, x):
self.x = x
self.nested = bar.FooSameName(x)
Reported by Pylint.
test/distributed/pipeline/sync/test_pipeline.py
8 issues
Line: 7
Column: 1
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from torch.distributed.pipeline.sync.pipeline import _clock_cycles
def test_clock_cycles():
assert list(_clock_cycles(1, 1)) == [[(0, 0)]]
assert list(_clock_cycles(1, 3)) == [[(0, 0)], [(0, 1)], [(0, 2)]]
Reported by Pylint.
Line: 1
Column: 1
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from torch.distributed.pipeline.sync.pipeline import _clock_cycles
Reported by Pylint.
Line: 10
Column: 1
from torch.distributed.pipeline.sync.pipeline import _clock_cycles
def test_clock_cycles():
assert list(_clock_cycles(1, 1)) == [[(0, 0)]]
assert list(_clock_cycles(1, 3)) == [[(0, 0)], [(0, 1)], [(0, 2)]]
assert list(_clock_cycles(3, 1)) == [[(0, 0)], [(1, 0)], [(2, 0)]]
assert list(_clock_cycles(3, 3)) == [
Reported by Pylint.
Line: 11
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def test_clock_cycles():
assert list(_clock_cycles(1, 1)) == [[(0, 0)]]
assert list(_clock_cycles(1, 3)) == [[(0, 0)], [(0, 1)], [(0, 2)]]
assert list(_clock_cycles(3, 1)) == [[(0, 0)], [(1, 0)], [(2, 0)]]
assert list(_clock_cycles(3, 3)) == [
[(0, 0)],
Reported by Bandit.
Line: 12
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def test_clock_cycles():
assert list(_clock_cycles(1, 1)) == [[(0, 0)]]
assert list(_clock_cycles(1, 3)) == [[(0, 0)], [(0, 1)], [(0, 2)]]
assert list(_clock_cycles(3, 1)) == [[(0, 0)], [(1, 0)], [(2, 0)]]
assert list(_clock_cycles(3, 3)) == [
[(0, 0)],
[(1, 0), (0, 1)],
Reported by Bandit.
Line: 13
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def test_clock_cycles():
assert list(_clock_cycles(1, 1)) == [[(0, 0)]]
assert list(_clock_cycles(1, 3)) == [[(0, 0)], [(0, 1)], [(0, 2)]]
assert list(_clock_cycles(3, 1)) == [[(0, 0)], [(1, 0)], [(2, 0)]]
assert list(_clock_cycles(3, 3)) == [
[(0, 0)],
[(1, 0), (0, 1)],
[(2, 0), (1, 1), (0, 2)],
Reported by Bandit.
Line: 15
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert list(_clock_cycles(1, 3)) == [[(0, 0)], [(0, 1)], [(0, 2)]]
assert list(_clock_cycles(3, 1)) == [[(0, 0)], [(1, 0)], [(2, 0)]]
assert list(_clock_cycles(3, 3)) == [
[(0, 0)],
[(1, 0), (0, 1)],
[(2, 0), (1, 1), (0, 2)],
[(2, 1), (1, 2)],
[(2, 2)],
Reported by Bandit.
Line: 23
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
[(2, 2)],
]
assert list(_clock_cycles(4, 2)) == [
[(0, 0)],
[(1, 0), (0, 1)],
[(2, 0), (1, 1)],
[(3, 0), (2, 1)],
[(3, 1)],
Reported by Bandit.
test/distributed/rpc/cuda/test_tensorpipe_agent.py
8 issues
Line: 5
Column: 1
import sys
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 11
Column: 1
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture import (
TensorPipeRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
GENERIC_CUDA_TESTS,
Reported by Pylint.
Line: 12
Column: 1
sys.exit(0)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture import (
TensorPipeRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
GENERIC_CUDA_TESTS,
TENSORPIPE_CUDA_TESTS,
Reported by Pylint.
Line: 15
Column: 1
from torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture import (
TensorPipeRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
GENERIC_CUDA_TESTS,
TENSORPIPE_CUDA_TESTS,
MultiProcess,
generate_tests,
)
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
import sys
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 11
Column: 1
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture import (
TensorPipeRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
GENERIC_CUDA_TESTS,
Reported by Pylint.
Line: 12
Column: 1
sys.exit(0)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture import (
TensorPipeRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
GENERIC_CUDA_TESTS,
TENSORPIPE_CUDA_TESTS,
Reported by Pylint.
Line: 15
Column: 1
from torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture import (
TensorPipeRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
GENERIC_CUDA_TESTS,
TENSORPIPE_CUDA_TESTS,
MultiProcess,
generate_tests,
)
Reported by Pylint.
test/cpp/jit/test_lite_interpreter.cpp
8 issues
Line: 49
ASSERT_TRUE(resd.equal(refd));
}
TEST(LiteInterpreterTest, CheckAttrAccess) {
Module m("m");
m.register_attribute("mobile_optimized", BoolType::get(), true);
std::stringstream ss;
m._save_for_mobile(ss);
Reported by Cppcheck.
Line: 114
Column: 17
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
}
TEST(LiteInterpreterTest, Conv) {
auto s = std::getenv("PYTORCH_TEST_WITH_TSAN");
if (s && strcmp(s, "1") == 0)
return;
std::vector<torch::jit::IValue> inputs;
Reported by FlawFinder.
Line: 1178
Column: 17
CWE codes:
807
20
Suggestion:
Check environment variables carefully before using them
}
TEST(LiteInterpreterTest, DefaultArgsConv) {
auto s = std::getenv("PYTORCH_TEST_WITH_TSAN");
if (s && strcmp(s, "1") == 0)
return;
std::vector<torch::jit::IValue> inputs;
Reported by FlawFinder.
Line: 46
Column: 20
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto resd = res.toTensor();
auto refd = ref.toTensor();
ASSERT_TRUE(resd.equal(refd));
}
TEST(LiteInterpreterTest, CheckAttrAccess) {
Module m("m");
m.register_attribute("mobile_optimized", BoolType::get(), true);
Reported by FlawFinder.
Line: 629
Column: 46
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
const std::vector<IValue>& actual_result_list,
const std::vector<Tensor>& expect_result_list) {
AT_ASSERT(actual_result_list.size() == expect_result_list.size());
AT_ASSERT(actual_result_list[0].toTensor().equal(expect_result_list[0]));
AT_ASSERT(
actual_result_list[1].toTensor().dim() == expect_result_list[1].dim());
AT_ASSERT(actual_result_list[2].toTensor().equal(expect_result_list[2]));
}
Reported by FlawFinder.
Line: 632
Column: 46
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
AT_ASSERT(actual_result_list[0].toTensor().equal(expect_result_list[0]));
AT_ASSERT(
actual_result_list[1].toTensor().dim() == expect_result_list[1].dim());
AT_ASSERT(actual_result_list[2].toTensor().equal(expect_result_list[2]));
}
void runAndCheckTorchScriptModel(
std::stringstream& input_model_stream,
const std::vector<IValue>& input_data,
Reported by FlawFinder.
Line: 1205
Column: 20
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
}
auto output = res.toTensor();
AT_ASSERT(outputref.dim() == output.dim());
AT_ASSERT(output.equal(outputref));
}
namespace {
void testLiteModuleCompareResultTensors(
Module& m,
Reported by FlawFinder.
Line: 1224
Column: 20
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
}
auto output = res.toTensor();
AT_ASSERT(outputref.dim() == output.dim());
AT_ASSERT(output.equal(outputref));
}
void testDefaultArgsPinv(int num_args) {
Module m("m");
if (num_args == 1) {
Reported by FlawFinder.
caffe2/python/operator_test/blobs_queue_db_test.py
8 issues
Line: 8
Column: 1
import numpy as np
import caffe2.proto.caffe2_pb2 as caffe2_pb2
from caffe2.python import core, workspace, timeout_guard, test_util
class BlobsQueueDBTest(test_util.TestCase):
def test_create_blobs_queue_db_string(self):
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import caffe2.proto.caffe2_pb2 as caffe2_pb2
from caffe2.python import core, workspace, timeout_guard, test_util
Reported by Pylint.
Line: 12
Column: 1
from caffe2.python import core, workspace, timeout_guard, test_util
class BlobsQueueDBTest(test_util.TestCase):
def test_create_blobs_queue_db_string(self):
def add_blobs(queue, num_samples):
blob = core.BlobReference("blob")
status = core.BlobReference("blob_status")
for i in range(num_samples):
Reported by Pylint.
Line: 13
Column: 5
class BlobsQueueDBTest(test_util.TestCase):
def test_create_blobs_queue_db_string(self):
def add_blobs(queue, num_samples):
blob = core.BlobReference("blob")
status = core.BlobReference("blob_status")
for i in range(num_samples):
self._add_blob_to_queue(
Reported by Pylint.
Line: 23
Column: 5
)
self._test_create_blobs_queue_db(add_blobs)
def test_create_blobs_queue_db_tensor(self):
def add_blobs(queue, num_samples):
blob = core.BlobReference("blob")
status = core.BlobReference("blob_status")
for i in range(num_samples):
data = self._create_test_tensor_protos(i)
Reported by Pylint.
Line: 75
Column: 5
self.assertEqual(1, item)
workspace.RunNetOnce(close_net)
def _add_blob_to_queue(self, queue, data, blob, status):
workspace.FeedBlob(blob, data)
op = core.CreateOperator(
"SafeEnqueueBlobs",
[queue, blob],
[blob, status],
Reported by Pylint.
Line: 77
Column: 9
def _add_blob_to_queue(self, queue, data, blob, status):
workspace.FeedBlob(blob, data)
op = core.CreateOperator(
"SafeEnqueueBlobs",
[queue, blob],
[blob, status],
)
workspace.RunOperatorOnce(op)
Reported by Pylint.
Line: 84
Column: 5
)
workspace.RunOperatorOnce(op)
def _create_test_tensor_protos(self, idx):
item = caffe2_pb2.TensorProtos()
data = item.protos.add()
data.data_type = core.DataType.STRING
data.string_data.append("foo{}".format(idx).encode('utf-8'))
label = item.protos.add()
Reported by Pylint.
caffe2/serialize/inline_container.cc
8 issues
Line: 88
Column: 5
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
constexpr size_t kMagicValueLength = 8;
if (size > kMagicValueLength) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
char buf[kMagicValueLength];
read(0, buf, kMagicValueLength);
valid("checking magic number");
AT_ASSERTM(
memcmp("PYTORCH1", buf, kMagicValueLength) != 0,
"File is an unsupported archive format from the preview release.");
Reported by FlawFinder.
Line: 220
Column: 3
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
mz_uint num_files = mz_zip_reader_get_num_files(ar_.get());
std::vector<std::string> out;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
char buf[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
for (size_t i = 0; i < num_files; i++) {
mz_zip_reader_get_filename(ar_.get(), i, buf, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE);
if (strncmp(
buf,
archive_name_plus_slash_.data(),
Reported by FlawFinder.
Line: 332
Column: 18
CWE codes:
362
CAFFE_THROW("invalid file name: ", file_name);
}
if (!writer_func_) {
file_stream_.open(
file_name,
std::ofstream::out | std::ofstream::trunc | std::ofstream::binary);
valid("opening archive ", file_name.c_str());
TORCH_CHECK(file_stream_, "File ", file_name, " cannot be opened.");
writer_func_ = [this](const void* buf, size_t nbytes) -> size_t {
Reported by FlawFinder.
Line: 28
Column: 16
CWE codes:
120
20
size_t istream_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) {
auto self = static_cast<PyTorchStreamReader*>(pOpaque);
return self->read(file_ofs, static_cast<char*>(pBuf), n);
}
static std::string basename(const std::string& name) {
size_t start = 0;
for(size_t i = 0; i < name.size(); ++i) {
Reported by FlawFinder.
Line: 52
Column: 29
CWE codes:
120
20
return name.substr(start, end - start);
}
size_t PyTorchStreamReader::read(uint64_t pos, char* buf, size_t n) {
return in_->read(pos, buf, n, "reading file");
}
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
PyTorchStreamReader::PyTorchStreamReader(const std::string& file_name)
Reported by FlawFinder.
Line: 53
Column: 15
CWE codes:
120
20
}
size_t PyTorchStreamReader::read(uint64_t pos, char* buf, size_t n) {
return in_->read(pos, buf, n, "reading file");
}
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
PyTorchStreamReader::PyTorchStreamReader(const std::string& file_name)
: ar_(std::make_unique<mz_zip_archive>()),
Reported by FlawFinder.
Line: 89
Column: 5
CWE codes:
120
20
if (size > kMagicValueLength) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
char buf[kMagicValueLength];
read(0, buf, kMagicValueLength);
valid("checking magic number");
AT_ASSERTM(
memcmp("PYTORCH1", buf, kMagicValueLength) != 0,
"File is an unsupported archive format from the preview release.");
}
Reported by FlawFinder.
Line: 276
Column: 8
CWE codes:
120
20
valid("retrieving file meta-data for ", name.c_str());
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
uint8_t local_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
in_->read(
stat.m_local_header_ofs,
local_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE,
"reading file header");
size_t filename_len = read_le_16(local_header + MZ_ZIP_LDH_FILENAME_LEN_OFS);
Reported by FlawFinder.
test/jit/test_ignorable_args.py
8 issues
Line: 3
Column: 1
import os
import sys
from torch._C import parse_ir
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 4
Column: 1
import os
import sys
from torch._C import parse_ir
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 9
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
from torch._C import parse_ir
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 9
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 17
Column: 1
"instead.")
# Tests that Python slice class is supported in TorchScript
class TestIgnorableArgs(JitTestCase):
def test_slice_ignorable_args_for_slice(self):
graph_str = """graph():
%13 : int = prim::Constant[value=0]()
%10 : bool = prim::Constant[value=0]()
%8 : NoneType = prim::Constant()
Reported by Pylint.
Line: 17
Column: 1
"instead.")
# Tests that Python slice class is supported in TorchScript
class TestIgnorableArgs(JitTestCase):
def test_slice_ignorable_args_for_slice(self):
graph_str = """graph():
%13 : int = prim::Constant[value=0]()
%10 : bool = prim::Constant[value=0]()
%8 : NoneType = prim::Constant()
Reported by Pylint.
Line: 18
Column: 5
# Tests that Python slice class is supported in TorchScript
class TestIgnorableArgs(JitTestCase):
def test_slice_ignorable_args_for_slice(self):
graph_str = """graph():
%13 : int = prim::Constant[value=0]()
%10 : bool = prim::Constant[value=0]()
%8 : NoneType = prim::Constant()
%0 : int = prim::Constant[value=1]()
Reported by Pylint.