The following issues were found
torch/fx/experimental/fx2trt/converters/maxpool.py
6 issues
Line: 2
Column: 1
import torch
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import mark_as_int8_layer, extend_attr_to_tuple
def common_maxpool(network, mod, dimension, input_val, layer_name):
kernel_size = extend_attr_to_tuple(mod, "kernel_size", dimension)
stride = extend_attr_to_tuple(mod, "stride", dimension)
Reported by Pylint.
Line: 5
Column: 1
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import mark_as_int8_layer, extend_attr_to_tuple
def common_maxpool(network, mod, dimension, input_val, layer_name):
kernel_size = extend_attr_to_tuple(mod, "kernel_size", dimension)
stride = extend_attr_to_tuple(mod, "stride", dimension)
padding = extend_attr_to_tuple(mod, "padding", dimension)
Reported by Pylint.
Line: 1
Column: 1
import torch
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import mark_as_int8_layer, extend_attr_to_tuple
def common_maxpool(network, mod, dimension, input_val, layer_name):
kernel_size = extend_attr_to_tuple(mod, "kernel_size", dimension)
stride = extend_attr_to_tuple(mod, "stride", dimension)
Reported by Pylint.
Line: 7
Column: 1
from .helper_functions import mark_as_int8_layer, extend_attr_to_tuple
def common_maxpool(network, mod, dimension, input_val, layer_name):
kernel_size = extend_attr_to_tuple(mod, "kernel_size", dimension)
stride = extend_attr_to_tuple(mod, "stride", dimension)
padding = extend_attr_to_tuple(mod, "padding", dimension)
layer = network.add_pooling(
Reported by Pylint.
Line: 29
Column: 1
@tensorrt_converter(torch.nn.modules.pooling.MaxPool2d)
def maxpool2d(network, submod, args, kwargs, layer_name):
# args/kwargs should have already been normalized to kwargs
assert len(args) == 0
input_val = kwargs["input"]
if not isinstance(input_val, trt.tensorrt.ITensor):
Reported by Pylint.
Line: 31
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
@tensorrt_converter(torch.nn.modules.pooling.MaxPool2d)
def maxpool2d(network, submod, args, kwargs, layer_name):
# args/kwargs should have already been normalized to kwargs
assert len(args) == 0
input_val = kwargs["input"]
if not isinstance(input_val, trt.tensorrt.ITensor):
raise RuntimeError(f"MaxPool2d received input {input_val} that is not part "
"of the TensorRT region!")
Reported by Bandit.
torch/fx/experimental/fx2trt/converters/helper_functions.py
6 issues
Line: 2
Column: 1
import torch
import tensorrt as trt
def extend_attr_to_tuple(mod: torch.nn.Module, name: str, size: int):
"""
Extend an attribute of `mod` that named `name` to a tuple of `size`.
"""
val = getattr(mod, name)
Reported by Pylint.
Line: 32
Column: 17
"""
Get the dynamic range of a tensor based on its scale, zero_point and dtype.
"""
if dtype == torch.quint8:
min_val, max_val = 0, 255
elif dtype == torch.qint8:
min_val, max_val = -128, 127
else:
raise RuntimeError(f"Unsupported quantized dtype {dtype}")
Reported by Pylint.
Line: 34
Column: 19
"""
if dtype == torch.quint8:
min_val, max_val = 0, 255
elif dtype == torch.qint8:
min_val, max_val = -128, 127
else:
raise RuntimeError(f"Unsupported quantized dtype {dtype}")
return (min_val - zero_point) * scale, (max_val - zero_point) * scale
Reported by Pylint.
Line: 1
Column: 1
import torch
import tensorrt as trt
def extend_attr_to_tuple(mod: torch.nn.Module, name: str, size: int):
"""
Extend an attribute of `mod` that named `name` to a tuple of `size`.
"""
val = getattr(mod, name)
Reported by Pylint.
Line: 47
Column: 1
Set the precision of a layer to int8 as well as the type of its first output.
Also set the dynamic range of its first output.
"""
if layer.type not in {trt.LayerType.SHUFFLE, trt.LayerType.CONCATENATION, trt.LayerType.CONSTANT, trt.LayerType.SHAPE}:
layer.precision = trt.int8
for i in range(layer.num_outputs):
output_val = layer.get_output(i)
output_val.dynamic_range = dynamic_range
Reported by Pylint.
Line: 57
Column: 1
# output_val.dtype = trt.int8
def get_inputs_from_args_and_kwargs(args, kwargs, input_names):
inputs = []
for i, key in enumerate(input_names):
if key not in kwargs:
inputs.append(args[i])
else:
Reported by Pylint.
torch/csrc/deploy/test_deploy_from_python.py
6 issues
Line: 1
Column: 1
from libfb.py import testutil
import test_deploy_python_ext
class TestDeployFromPython(testutil.BaseFacebookTestCase):
def test_deploy_from_python(self):
self.assertTrue(test_deploy_python_ext.run())
Reported by Pylint.
Line: 3
Column: 1
from libfb.py import testutil
import test_deploy_python_ext
class TestDeployFromPython(testutil.BaseFacebookTestCase):
def test_deploy_from_python(self):
self.assertTrue(test_deploy_python_ext.run())
Reported by Pylint.
Line: 1
Column: 1
from libfb.py import testutil
import test_deploy_python_ext
class TestDeployFromPython(testutil.BaseFacebookTestCase):
def test_deploy_from_python(self):
self.assertTrue(test_deploy_python_ext.run())
Reported by Pylint.
Line: 5
Column: 1
import test_deploy_python_ext
class TestDeployFromPython(testutil.BaseFacebookTestCase):
def test_deploy_from_python(self):
self.assertTrue(test_deploy_python_ext.run())
Reported by Pylint.
Line: 5
Column: 1
import test_deploy_python_ext
class TestDeployFromPython(testutil.BaseFacebookTestCase):
def test_deploy_from_python(self):
self.assertTrue(test_deploy_python_ext.run())
Reported by Pylint.
Line: 6
Column: 5
import test_deploy_python_ext
class TestDeployFromPython(testutil.BaseFacebookTestCase):
def test_deploy_from_python(self):
self.assertTrue(test_deploy_python_ext.run())
Reported by Pylint.
test/typing/reveal/opt_size.py
6 issues
Line: 1
Column: 1
import torch
avg_pool1 = torch.nn.AdaptiveAvgPool2d((1, None))
reveal_type(avg_pool1) # E: {AdaptiveAvgPool2d}
avg_pool2 = torch.nn.AdaptiveAvgPool2d((None, 1))
reveal_type(avg_pool2) # E: {AdaptiveAvgPool2d}
max_pool1 = torch.nn.AdaptiveMaxPool2d((1, None))
reveal_type(max_pool1) # E: {AdaptiveMaxPool2d}
max_pool2 = torch.nn.AdaptiveMaxPool2d((None, 1))
Reported by Pylint.
Line: 4
Column: 1
import torch
avg_pool1 = torch.nn.AdaptiveAvgPool2d((1, None))
reveal_type(avg_pool1) # E: {AdaptiveAvgPool2d}
avg_pool2 = torch.nn.AdaptiveAvgPool2d((None, 1))
reveal_type(avg_pool2) # E: {AdaptiveAvgPool2d}
max_pool1 = torch.nn.AdaptiveMaxPool2d((1, None))
reveal_type(max_pool1) # E: {AdaptiveMaxPool2d}
max_pool2 = torch.nn.AdaptiveMaxPool2d((None, 1))
Reported by Pylint.
Line: 6
Column: 1
avg_pool1 = torch.nn.AdaptiveAvgPool2d((1, None))
reveal_type(avg_pool1) # E: {AdaptiveAvgPool2d}
avg_pool2 = torch.nn.AdaptiveAvgPool2d((None, 1))
reveal_type(avg_pool2) # E: {AdaptiveAvgPool2d}
max_pool1 = torch.nn.AdaptiveMaxPool2d((1, None))
reveal_type(max_pool1) # E: {AdaptiveMaxPool2d}
max_pool2 = torch.nn.AdaptiveMaxPool2d((None, 1))
reveal_type(max_pool2) # E: {AdaptiveMaxPool2d}
Reported by Pylint.
Line: 8
Column: 1
avg_pool2 = torch.nn.AdaptiveAvgPool2d((None, 1))
reveal_type(avg_pool2) # E: {AdaptiveAvgPool2d}
max_pool1 = torch.nn.AdaptiveMaxPool2d((1, None))
reveal_type(max_pool1) # E: {AdaptiveMaxPool2d}
max_pool2 = torch.nn.AdaptiveMaxPool2d((None, 1))
reveal_type(max_pool2) # E: {AdaptiveMaxPool2d}
Reported by Pylint.
Line: 10
Column: 1
max_pool1 = torch.nn.AdaptiveMaxPool2d((1, None))
reveal_type(max_pool1) # E: {AdaptiveMaxPool2d}
max_pool2 = torch.nn.AdaptiveMaxPool2d((None, 1))
reveal_type(max_pool2) # E: {AdaptiveMaxPool2d}
Reported by Pylint.
Line: 1
Column: 1
import torch
avg_pool1 = torch.nn.AdaptiveAvgPool2d((1, None))
reveal_type(avg_pool1) # E: {AdaptiveAvgPool2d}
avg_pool2 = torch.nn.AdaptiveAvgPool2d((None, 1))
reveal_type(avg_pool2) # E: {AdaptiveAvgPool2d}
max_pool1 = torch.nn.AdaptiveMaxPool2d((1, None))
reveal_type(max_pool1) # E: {AdaptiveMaxPool2d}
max_pool2 = torch.nn.AdaptiveMaxPool2d((None, 1))
Reported by Pylint.
torch/distributions/log_normal.py
6 issues
Line: 7
Column: 1
from torch.distributions.transformed_distribution import TransformedDistribution
class LogNormal(TransformedDistribution):
r"""
Creates a log-normal distribution parameterized by
:attr:`loc` and :attr:`scale` where::
X ~ Normal(loc, scale)
Reported by Pylint.
Line: 1
Column: 1
from torch.distributions import constraints
from torch.distributions.transforms import ExpTransform
from torch.distributions.normal import Normal
from torch.distributions.transformed_distribution import TransformedDistribution
class LogNormal(TransformedDistribution):
r"""
Creates a log-normal distribution parameterized by
Reported by Pylint.
Line: 31
Column: 9
def __init__(self, loc, scale, validate_args=None):
base_dist = Normal(loc, scale, validate_args=validate_args)
super(LogNormal, self).__init__(base_dist, ExpTransform(), validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(LogNormal, _instance)
return super(LogNormal, self).expand(batch_shape, _instance=new)
Reported by Pylint.
Line: 35
Column: 16
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(LogNormal, _instance)
return super(LogNormal, self).expand(batch_shape, _instance=new)
@property
def loc(self):
return self.base_dist.loc
Reported by Pylint.
Line: 38
Column: 5
return super(LogNormal, self).expand(batch_shape, _instance=new)
@property
def loc(self):
return self.base_dist.loc
@property
def scale(self):
return self.base_dist.scale
Reported by Pylint.
Line: 42
Column: 5
return self.base_dist.loc
@property
def scale(self):
return self.base_dist.scale
@property
def mean(self):
return (self.loc + self.scale.pow(2) / 2).exp()
Reported by Pylint.
test/typing/reveal/size.py
6 issues
Line: 1
Column: 1
import torch
input = []
input.append(torch.tensor([1.0, 2.0, 3.0, 4.0]))
input.append(torch.tensor([[1.0, 2.0, 3.0, 4.0]]))
input.append(torch.tensor([[[1.0, 2.0, 3.0, 4.0]]]))
reveal_type(input[0].shape[0]) # E: int
reveal_type(input[1].shape[1]) # E: int
reveal_type(input[2].shape[2]) # E: int
Reported by Pylint.
Line: 6
Column: 1
input.append(torch.tensor([1.0, 2.0, 3.0, 4.0]))
input.append(torch.tensor([[1.0, 2.0, 3.0, 4.0]]))
input.append(torch.tensor([[[1.0, 2.0, 3.0, 4.0]]]))
reveal_type(input[0].shape[0]) # E: int
reveal_type(input[1].shape[1]) # E: int
reveal_type(input[2].shape[2]) # E: int
Reported by Pylint.
Line: 7
Column: 1
input.append(torch.tensor([[1.0, 2.0, 3.0, 4.0]]))
input.append(torch.tensor([[[1.0, 2.0, 3.0, 4.0]]]))
reveal_type(input[0].shape[0]) # E: int
reveal_type(input[1].shape[1]) # E: int
reveal_type(input[2].shape[2]) # E: int
Reported by Pylint.
Line: 8
Column: 1
input.append(torch.tensor([[[1.0, 2.0, 3.0, 4.0]]]))
reveal_type(input[0].shape[0]) # E: int
reveal_type(input[1].shape[1]) # E: int
reveal_type(input[2].shape[2]) # E: int
Reported by Pylint.
Line: 2
Column: 1
import torch
input = []
input.append(torch.tensor([1.0, 2.0, 3.0, 4.0]))
input.append(torch.tensor([[1.0, 2.0, 3.0, 4.0]]))
input.append(torch.tensor([[[1.0, 2.0, 3.0, 4.0]]]))
reveal_type(input[0].shape[0]) # E: int
reveal_type(input[1].shape[1]) # E: int
reveal_type(input[2].shape[2]) # E: int
Reported by Pylint.
Line: 1
Column: 1
import torch
input = []
input.append(torch.tensor([1.0, 2.0, 3.0, 4.0]))
input.append(torch.tensor([[1.0, 2.0, 3.0, 4.0]]))
input.append(torch.tensor([[[1.0, 2.0, 3.0, 4.0]]]))
reveal_type(input[0].shape[0]) # E: int
reveal_type(input[1].shape[1]) # E: int
reveal_type(input[2].shape[2]) # E: int
Reported by Pylint.
torch/_deploy.py
6 issues
Line: 20
Column: 3
importers = sys_importer
def persistent_id(obj):
# FIXME: the docs say that persistent_id should only return a string
# but torch store returns tuples. This works only in the binary protocol
# see
# https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects
# https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537
if torch.is_storage(obj):
Reported by Pylint.
Line: 49
Column: 20
data_value = data_buf.getvalue()
return data_value, serialized_storages, serialized_dtypes, importer.zip_reader if importer else None
def _load_storages(id, zip_reader, obj_bytes, serialized_storages):
def persistent_load(saved_id):
assert isinstance(saved_id, tuple)
typename = _maybe_decode_ascii(saved_id[0])
data = saved_id[1:]
Reported by Pylint.
Line: 1
Column: 1
import io
import torch
from torch.package._package_pickler import create_pickler
from torch.package._package_unpickler import PackageUnpickler
from torch.package import sys_importer, OrderedImporter, PackageImporter, Importer
from torch.serialization import _maybe_decode_ascii
def _save_storages(importer, obj):
serialized_storages = []
Reported by Pylint.
Line: 47
Column: 1
pickler.persistent_id = persistent_id
pickler.dump(obj)
data_value = data_buf.getvalue()
return data_value, serialized_storages, serialized_dtypes, importer.zip_reader if importer else None
def _load_storages(id, zip_reader, obj_bytes, serialized_storages):
def persistent_load(saved_id):
assert isinstance(saved_id, tuple)
Reported by Pylint.
Line: 49
Column: 1
data_value = data_buf.getvalue()
return data_value, serialized_storages, serialized_dtypes, importer.zip_reader if importer else None
def _load_storages(id, zip_reader, obj_bytes, serialized_storages):
def persistent_load(saved_id):
assert isinstance(saved_id, tuple)
typename = _maybe_decode_ascii(saved_id[0])
data = saved_id[1:]
Reported by Pylint.
Line: 52
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def _load_storages(id, zip_reader, obj_bytes, serialized_storages):
def persistent_load(saved_id):
assert isinstance(saved_id, tuple)
typename = _maybe_decode_ascii(saved_id[0])
data = saved_id[1:]
if typename == 'storage':
return serialized_storages[data[0]]
Reported by Bandit.
test/typing/reveal/tensor_copy.py
6 issues
Line: 1
Column: 1
import torch
t = torch.randn(2, 3)
reveal_type(t) # E: {Tensor}
u = torch.randn(2, 3)
reveal_type(u) # E: {Tensor}
t.copy_(u)
reveal_type(t) # E: {Tensor}
Reported by Pylint.
Line: 5
Column: 1
t = torch.randn(2, 3)
reveal_type(t) # E: {Tensor}
u = torch.randn(2, 3)
reveal_type(u) # E: {Tensor}
t.copy_(u)
reveal_type(t) # E: {Tensor}
r = (t == u).all()
Reported by Pylint.
Line: 7
Column: 1
t = torch.randn(2, 3)
reveal_type(t) # E: {Tensor}
u = torch.randn(2, 3)
reveal_type(u) # E: {Tensor}
t.copy_(u)
reveal_type(t) # E: {Tensor}
r = (t == u).all()
reveal_type(r) # E: {Tensor}
Reported by Pylint.
Line: 9
Column: 1
u = torch.randn(2, 3)
reveal_type(u) # E: {Tensor}
t.copy_(u)
reveal_type(t) # E: {Tensor}
r = (t == u).all()
reveal_type(r) # E: {Tensor}
Reported by Pylint.
Line: 11
Column: 1
t.copy_(u)
reveal_type(t) # E: {Tensor}
r = (t == u).all()
reveal_type(r) # E: {Tensor}
Reported by Pylint.
Line: 1
Column: 1
import torch
t = torch.randn(2, 3)
reveal_type(t) # E: {Tensor}
u = torch.randn(2, 3)
reveal_type(u) # E: {Tensor}
t.copy_(u)
reveal_type(t) # E: {Tensor}
Reported by Pylint.
ios/TestApp/benchmark/trace_model.py
6 issues
Line: 1
Column: 1
import torch
import torchvision
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torchvision.models.mobilenet_v2(pretrained=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.script(model, example)
optimized_scripted_module = optimize_for_mobile(traced_script_module)
Reported by Pylint.
Line: 2
Column: 1
import torch
import torchvision
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torchvision.models.mobilenet_v2(pretrained=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.script(model, example)
optimized_scripted_module = optimize_for_mobile(traced_script_module)
Reported by Pylint.
Line: 3
Column: 1
import torch
import torchvision
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torchvision.models.mobilenet_v2(pretrained=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.script(model, example)
optimized_scripted_module = optimize_for_mobile(traced_script_module)
Reported by Pylint.
Line: 10
Column: 38
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.script(model, example)
optimized_scripted_module = optimize_for_mobile(traced_script_module)
exported_optimized_scripted_module = optimized_scripted_module._save_for_lite_interpreter("model.ptl")
Reported by Pylint.
Line: 1
Column: 1
import torch
import torchvision
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torchvision.models.mobilenet_v2(pretrained=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.script(model, example)
optimized_scripted_module = optimize_for_mobile(traced_script_module)
Reported by Pylint.
Line: 10
Column: 1
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.script(model, example)
optimized_scripted_module = optimize_for_mobile(traced_script_module)
exported_optimized_scripted_module = optimized_scripted_module._save_for_lite_interpreter("model.ptl")
Reported by Pylint.
test/cpp/rpc/test_tensorpipe_serialization.cpp
6 issues
Line: 74
Column: 7
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
if (srcPayload.length) {
// Empty vector's data() can return nullptr, use the length to avoid
// coying into nullptr
memcpy(dstPayload.data, srcPayload.data, srcPayload.length);
}
}
EXPECT_EQ(
recvingTpAllocation.tensors.size(), sendingTpMessage.tensors.size());
for (int i = 0; i < recvingTpAllocation.tensors.size(); i++) {
Reported by FlawFinder.
Line: 82
Column: 5
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (int i = 0; i < recvingTpAllocation.tensors.size(); i++) {
tensorpipe::Message::Tensor& srcTensor = sendingTpMessage.tensors[i];
tensorpipe::Allocation::Tensor& dstTensor = recvingTpAllocation.tensors[i];
memcpy(
dstTensor.buffer.unwrap<tensorpipe::CpuBuffer>().ptr,
srcTensor.buffer.unwrap<tensorpipe::CpuBuffer>().ptr,
srcTensor.length);
}
Reported by FlawFinder.
Line: 99
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
EXPECT_EQ(mtype, recvingRpcMessage->type());
EXPECT_EQ(payloadCopy, recvingRpcMessage->payload());
EXPECT_EQ(mId, recvingRpcMessage->id());
EXPECT_TRUE(torch::equal(t1, recvingRpcMessage->tensors()[0]));
EXPECT_TRUE(torch::equal(t2, recvingRpcMessage->tensors()[1]));
}
TEST(TensorpipeSerialize, RecopySparseTensors) {
// Take a 1K row of a 1M tensors, and make sure we don't send across 1M rows.
Reported by FlawFinder.
Line: 100
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
EXPECT_EQ(payloadCopy, recvingRpcMessage->payload());
EXPECT_EQ(mId, recvingRpcMessage->id());
EXPECT_TRUE(torch::equal(t1, recvingRpcMessage->tensors()[0]));
EXPECT_TRUE(torch::equal(t2, recvingRpcMessage->tensors()[1]));
}
TEST(TensorpipeSerialize, RecopySparseTensors) {
// Take a 1K row of a 1M tensors, and make sure we don't send across 1M rows.
constexpr size_t k1K = 1024;
Reported by FlawFinder.
Line: 126
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
EXPECT_EQ(tpBuffers.tensors.size(), 2);
EXPECT_EQ(sendingTpMessage.tensors.size(), 2);
EXPECT_TRUE(torch::equal(main, tpBuffers.tensors[0]));
EXPECT_TRUE(torch::equal(tiny, tpBuffers.tensors[1]));
// Test cloned storage
EXPECT_EQ(
main.storage().data(),
sendingTpMessage.tensors[0].buffer.unwrap<tensorpipe::CpuBuffer>().ptr);
Reported by FlawFinder.
Line: 127
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
EXPECT_EQ(tpBuffers.tensors.size(), 2);
EXPECT_EQ(sendingTpMessage.tensors.size(), 2);
EXPECT_TRUE(torch::equal(main, tpBuffers.tensors[0]));
EXPECT_TRUE(torch::equal(tiny, tpBuffers.tensors[1]));
// Test cloned storage
EXPECT_EQ(
main.storage().data(),
sendingTpMessage.tensors[0].buffer.unwrap<tensorpipe::CpuBuffer>().ptr);
EXPECT_NE(
Reported by FlawFinder.