The following issues were found
caffe2/python/build.py
3 issues
Line: 8
Column: 29
import caffe2.python._import_c_extension as C
CAFFE2_NO_OPERATOR_SCHEMA = C.define_caffe2_no_operator_schema
build_options = C.get_build_options()
Reported by Pylint.
Line: 9
Column: 17
import caffe2.python._import_c_extension as C
CAFFE2_NO_OPERATOR_SCHEMA = C.define_caffe2_no_operator_schema
build_options = C.get_build_options()
Reported by Pylint.
Line: 1
Column: 1
import caffe2.python._import_c_extension as C
CAFFE2_NO_OPERATOR_SCHEMA = C.define_caffe2_no_operator_schema
build_options = C.get_build_options()
Reported by Pylint.
aten/src/ATen/native/vulkan/ops/Mm.cpp
3 issues
Line: 219
#ifdef USE_VULKAN_API
TORCH_LIBRARY_IMPL(aten, Vulkan, m) {
m.impl(TORCH_SELECTIVE_NAME("aten::addmm"), TORCH_FN(addmm));
m.impl(TORCH_SELECTIVE_NAME("aten::mm"), TORCH_FN(mm));
}
#endif /* USE_VULKAN_API */
Reported by Cppcheck.
Line: 57
Column: 7
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (int64_t src_w = 0; src_w < src_kw_sz; ++src_w) {
int64_t dst_plane = 2*(src_h%2) + (src_w%2);
int64_t dst_index = (src_h/2)*dst_kw_sz + (src_w/2);
memcpy(
dst_weight_ptr + dst_plane * dst_plane_sz + dst_index,
src_weight_ptr + src_h * src_kw_sz + src_w,
sizeof(float));
}
}
Reported by FlawFinder.
Line: 121
Column: 9
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
for (int64_t src_w = 0; src_w < src_kw_sz; ++src_w) {
int64_t dst_plane = 2*(src_h%2) + (src_w%2);
int64_t dst_index = (src_h/2)*dst_kw_sz + (src_w/2);
memcpy(
dst_bias_ptr + dst_plane * dst_plane_sz + dst_index,
src_bias_ptr + src_h * src_kw_sz + src_w,
sizeof(float));
}
}
Reported by FlawFinder.
torch/utils/benchmark/examples/simple_timeit.py
3 issues
Line: 14
Column: 48
def main():
timer = benchmark_utils.Timer(
stmt="x + y",
globals={"x": torch.ones((4, 8)), "y": torch.ones((1, 8))},
label="Broadcasting add (4x8)",
)
for i in range(3):
print(f"Run: {i}\n{'-' * 40}")
Reported by Pylint.
Line: 14
Column: 23
def main():
timer = benchmark_utils.Timer(
stmt="x + y",
globals={"x": torch.ones((4, 8)), "y": torch.ones((1, 8))},
label="Broadcasting add (4x8)",
)
for i in range(3):
print(f"Run: {i}\n{'-' * 40}")
Reported by Pylint.
Line: 11
Column: 1
import torch.utils.benchmark as benchmark_utils
def main():
timer = benchmark_utils.Timer(
stmt="x + y",
globals={"x": torch.ones((4, 8)), "y": torch.ones((1, 8))},
label="Broadcasting add (4x8)",
)
Reported by Pylint.
torch/linalg/__init__.py
3 issues
Line: 1
Column: 1
# -*- coding: utf-8 -*-
import sys
import torch
from torch._C import _add_docstr, _linalg # type: ignore[attr-defined]
Tensor = torch.Tensor
common_notes = {
Reported by Pylint.
Line: 1
Column: 1
# -*- coding: utf-8 -*-
import sys
import torch
from torch._C import _add_docstr, _linalg # type: ignore[attr-defined]
Tensor = torch.Tensor
common_notes = {
Reported by Pylint.
Line: 10
Column: 1
Tensor = torch.Tensor
common_notes = {
"sync_note": """When inputs are on a CUDA device, this function synchronizes that device with the CPU."""
}
# Note: This not only adds doc strings for functions in the linalg namespace, but
# also connects the torch.linalg Python namespace to the torch._C._linalg builtins.
Reported by Pylint.
torch/nn/intrinsic/qat/modules/__init__.py
3 issues
Line: 1
Column: 1
from .linear_relu import LinearReLU
from .conv_fused import (
ConvBn1d,
ConvBn2d,
ConvBn3d,
ConvBnReLU1d,
ConvBnReLU2d,
ConvBnReLU3d,
ConvReLU2d,
Reported by Pylint.
Line: 2
Column: 1
from .linear_relu import LinearReLU
from .conv_fused import (
ConvBn1d,
ConvBn2d,
ConvBn3d,
ConvBnReLU1d,
ConvBnReLU2d,
ConvBnReLU3d,
ConvReLU2d,
Reported by Pylint.
Line: 1
Column: 1
from .linear_relu import LinearReLU
from .conv_fused import (
ConvBn1d,
ConvBn2d,
ConvBn3d,
ConvBnReLU1d,
ConvBnReLU2d,
ConvBnReLU3d,
ConvReLU2d,
Reported by Pylint.
torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py
3 issues
Line: 22
Column: 3
)
def get_shutdown_error_regex(self):
# FIXME Once we consolidate the error messages returned by the
# TensorPipe agent put some more specific regex here.
error_regexes = [".*"]
return "|".join(["({})".format(error_str) for error_str in error_regexes])
def get_timeout_error_regex(self):
Reported by Pylint.
Line: 1
Column: 1
import torch.distributed.rpc as rpc
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
class TensorPipeRpcAgentTestFixture(RpcAgentTestFixture):
@property
def rpc_backend(self):
Reported by Pylint.
Line: 7
Column: 1
)
class TensorPipeRpcAgentTestFixture(RpcAgentTestFixture):
@property
def rpc_backend(self):
return rpc.backend_registry.BackendType[
"TENSORPIPE"
]
Reported by Pylint.
torch/testing/_internal/test_module/future_div.py
3 issues
Line: 1
Column: 1
from __future__ import division
def div_int_future():
return 1 / 2
def div_float_future():
return 3.14 / 0.125
Reported by Pylint.
Line: 4
Column: 1
from __future__ import division
def div_int_future():
return 1 / 2
def div_float_future():
return 3.14 / 0.125
Reported by Pylint.
Line: 8
Column: 1
return 1 / 2
def div_float_future():
return 3.14 / 0.125
Reported by Pylint.
torch/nn/intrinsic/quantized/_reference/modules/__init__.py
3 issues
Line: 2
Column: 1
import torch
from .linear_relu import LinearReLU
from .conv_relu import ConvReLU1d, ConvReLU2d, ConvReLU3d
__all__ = [
'LinearReLU',
'ConvReLU1d',
'ConvReLU2d',
'ConvReLU3d',
Reported by Pylint.
Line: 3
Column: 1
import torch
from .linear_relu import LinearReLU
from .conv_relu import ConvReLU1d, ConvReLU2d, ConvReLU3d
__all__ = [
'LinearReLU',
'ConvReLU1d',
'ConvReLU2d',
'ConvReLU3d',
Reported by Pylint.
Line: 1
Column: 1
import torch
from .linear_relu import LinearReLU
from .conv_relu import ConvReLU1d, ConvReLU2d, ConvReLU3d
__all__ = [
'LinearReLU',
'ConvReLU1d',
'ConvReLU2d',
'ConvReLU3d',
Reported by Pylint.
torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py
3 issues
Line: 2
Column: 1
import torch.distributed.rpc as rpc
import torch.distributed.rpc._testing # noqa: F401
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
# The following message types are currently retried in the RREF protocol and
# distributed autograd. Thus only these messages should be tested with the
# Faulty RPC Agent.
Reported by Pylint.
Line: 1
Column: 1
import torch.distributed.rpc as rpc
import torch.distributed.rpc._testing # noqa: F401
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
# The following message types are currently retried in the RREF protocol and
# distributed autograd. Thus only these messages should be tested with the
# Faulty RPC Agent.
Reported by Pylint.
Line: 22
Column: 1
"SCRIPT_CALL": 1.5, # Script/Builtin
}
class FaultyRpcAgentTestFixture(RpcAgentTestFixture):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.messages_to_fail = retryable_message_types
self.messages_to_delay = default_messages_to_delay
Reported by Pylint.
torch/nn/intrinsic/quantized/modules/linear_relu.py
3 issues
Line: 24
Column: 68
"""
_FLOAT_MODULE = nni.LinearReLU
def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):
super().__init__(in_features, out_features, bias, dtype)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.ops.quantized.linear_relu(
x, self._packed_params._packed_params, self.scale, self.zero_point)
Reported by Pylint.
Line: 29
Column: 16
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.ops.quantized.linear_relu(
x, self._packed_params._packed_params, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedLinearReLU'
@classmethod
Reported by Pylint.
Line: 1
Column: 1
import torch
import torch.nn.quantized as nnq
import torch.nn.intrinsic as nni
class LinearReLU(nnq.Linear):
r"""
A LinearReLU module fused from Linear and ReLU modules
We adopt the same interface as :class:`torch.nn.quantized.Linear`.
Reported by Pylint.