The following issues were found
torch/autograd/profiler.py
67 issues
Line: 432
Column: 37
self.run_callbacks_on_exit: bool = True
# Stores underlying RecordFunction as a tensor. TODO: move to custom
# class (https://github.com/pytorch/pytorch/issues/35026).
self.handle: torch.Tensor = torch.zeros(1)
def __enter__(self):
self.handle = torch.ops.profiler._record_function_enter(self.name)
return self
Reported by Pylint.
Line: 467
Column: 27
# We are scheduling to run this RecordFunction's end callbacks when the
# passed in future completes, so don't run end callbacks on exit.
self.run_callbacks_on_exit = False
profiled_future = torch.ops.profiler._call_end_callbacks_on_jit_fut(self.handle, fut)
return profiled_future
class emit_nvtx(object):
"""Context manager that makes every autograd operation emit an NVTX range.
Reported by Pylint.
Line: 612
Column: 28
# Parse strings table
strings = {}
for r in conn.execute("SELECT _id_ as id, value FROM StringTable"):
strings[r["id"]] = torch._C._demangle(r["value"])
# First, find all functions and create FunctionEvents for them
marker_query = """
SELECT
start.id AS marker_id, start.name, start.timestamp AS start_time, end.timestamp AS end_time
Reported by Pylint.
Line: 612
Column: 28
# Parse strings table
strings = {}
for r in conn.execute("SELECT _id_ as id, value FROM StringTable"):
strings[r["id"]] = torch._C._demangle(r["value"])
# First, find all functions and create FunctionEvents for them
marker_query = """
SELECT
start.id AS marker_id, start.name, start.timestamp AS start_time, end.timestamp AS end_time
Reported by Pylint.
Line: 635
Column: 3
name=strings[row['name']],
start_us=row['start_time'],
end_us=row['end_time'],
thread=0) # TODO: find in sqlite database
functions.append(evt)
functions_map[evt.id] = evt
# Now, correlate all kernels with FunctionEvents
kernel_query = """
Reported by Pylint.
Line: 1
Column: 1
from torch.autograd.profiler_util import (
EventList, FunctionEvent, MemRecordsAcc, MEMORY_EVENT_NAME,
_filter_name, _filter_stack_entry, _rewrite_name
)
from torch.autograd import (
DeviceType, ProfilerActivity, ProfilerConfig, ProfilerState,
kineto_available, _ProfilerResult, _disable_profiler, _enable_profiler,
_prepare_profiler, _supported_activities
Reported by Pylint.
Line: 14
Column: 1
import torch
import torch.cuda
from torch.futures import Future
from typing import Any, Dict, List, Optional
from warnings import warn
try:
# Available in Python >= 3.2
Reported by Pylint.
Line: 15
Column: 1
import torch.cuda
from torch.futures import Future
from typing import Any, Dict, List, Optional
from warnings import warn
try:
# Available in Python >= 3.2
from contextlib import ContextDecorator
Reported by Pylint.
Line: 24
Column: 5
except ImportError:
import functools
class ContextDecorator(object): # type: ignore[no-redef]
def __enter__(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb):
Reported by Pylint.
Line: 24
Column: 5
except ImportError:
import functools
class ContextDecorator(object): # type: ignore[no-redef]
def __enter__(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb):
Reported by Pylint.
test/quantization/core/test_quantized_functional.py
67 issues
Line: 2
Column: 1
# Torch
import torch
import torch.nn.functional as F
import torch.nn.quantized.functional as qF
# Standard library
import numpy as np
# Testing utils
Reported by Pylint.
Line: 3
Column: 1
# Torch
import torch
import torch.nn.functional as F
import torch.nn.quantized.functional as qF
# Standard library
import numpy as np
# Testing utils
Reported by Pylint.
Line: 4
Column: 1
# Torch
import torch
import torch.nn.functional as F
import torch.nn.quantized.functional as qF
# Standard library
import numpy as np
# Testing utils
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
# Testing utils
from hypothesis import assume, given
from hypothesis import strategies as st
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
_make_conv_test_input,
)
Reported by Pylint.
Line: 11
Column: 1
# Testing utils
from hypothesis import assume, given
from hypothesis import strategies as st
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
_make_conv_test_input,
)
from torch.testing._internal.common_quantized import override_quantized_engine
Reported by Pylint.
Line: 12
Column: 1
# Testing utils
from hypothesis import assume, given
from hypothesis import strategies as st
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
_make_conv_test_input,
)
from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_utils import (
Reported by Pylint.
Line: 16
Column: 1
QuantizationTestCase,
_make_conv_test_input,
)
from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_utils import (
IS_PPC,
TEST_WITH_UBSAN,
)
Reported by Pylint.
Line: 17
Column: 1
_make_conv_test_input,
)
from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_utils import (
IS_PPC,
TEST_WITH_UBSAN,
)
class TestQuantizedFunctionalOps(QuantizationTestCase):
Reported by Pylint.
Line: 1
Column: 1
# Torch
import torch
import torch.nn.functional as F
import torch.nn.quantized.functional as qF
# Standard library
import numpy as np
# Testing utils
Reported by Pylint.
Line: 22
Column: 1
TEST_WITH_UBSAN,
)
class TestQuantizedFunctionalOps(QuantizationTestCase):
def test_relu_api(self):
X = torch.arange(-5, 5, dtype=torch.float)
scale = 2.0
zero_point = 1
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point, dtype=torch.quint8)
Reported by Pylint.
torch/nn/modules/transformer.py
67 issues
Line: 6
Column: 1
import torch
from torch import Tensor
from .. import functional as F
from .module import Module
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
Reported by Pylint.
Line: 7
Column: 1
import torch
from torch import Tensor
from .. import functional as F
from .module import Module
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
Reported by Pylint.
Line: 8
Column: 1
from torch import Tensor
from .. import functional as F
from .module import Module
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNorm
Reported by Pylint.
Line: 9
Column: 1
from .. import functional as F
from .module import Module
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNorm
Reported by Pylint.
Line: 10
Column: 1
from .module import Module
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNorm
Reported by Pylint.
Line: 11
Column: 1
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNorm
class Transformer(Module):
Reported by Pylint.
Line: 12
Column: 1
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNorm
class Transformer(Module):
r"""A transformer model. User is able to modify the attributes as needed. The architecture
Reported by Pylint.
Line: 13
Column: 1
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNorm
class Transformer(Module):
r"""A transformer model. User is able to modify the attributes as needed. The architecture
is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
Reported by Pylint.
Line: 152
Column: 16
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
return torch.triu(torch.full((sz, sz), float('-inf')), diagonal=1)
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
Reported by Pylint.
Line: 152
Column: 27
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
return torch.triu(torch.full((sz, sz), float('-inf')), diagonal=1)
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
Reported by Pylint.
benchmarks/distributed/ddp/compare/compare_ddp.py
67 issues
Line: 23
Column: 1
import pickle
import glob
import python_ddp
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
Reported by Pylint.
Line: 24
Column: 1
import glob
import python_ddp
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
Reported by Pylint.
Line: 25
Column: 1
import python_ddp
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
from collections import OrderedDict
Reported by Pylint.
Line: 26
Column: 1
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
from collections import OrderedDict
from enum import Enum
Reported by Pylint.
Line: 27
Column: 1
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
from collections import OrderedDict
from enum import Enum
from tabulate import tabulate
Reported by Pylint.
Line: 28
Column: 1
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
from collections import OrderedDict
from enum import Enum
from tabulate import tabulate
from torch.nn.parallel import DistributedDataParallel as DDP
Reported by Pylint.
Line: 32
Column: 1
from collections import OrderedDict
from enum import Enum
from tabulate import tabulate
from torch.nn.parallel import DistributedDataParallel as DDP
class DDPOption(Enum):
DDP_CPP_CORE = 1
PYTHON_DDP_SYNC_REDUCTION = 2
Reported by Pylint.
Line: 33
Column: 1
from collections import OrderedDict
from enum import Enum
from tabulate import tabulate
from torch.nn.parallel import DistributedDataParallel as DDP
class DDPOption(Enum):
DDP_CPP_CORE = 1
PYTHON_DDP_SYNC_REDUCTION = 2
PYTHON_DDP_ASYNC_REDUCTION = 3
Reported by Pylint.
Line: 71
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b301-pickle
buffer_size_to_metrics = OrderedDict()
for file_path in files:
with open(file_path, "rb") as f:
data = pickle.load(f)
# Add data to buffer_size_to_metrics
buffer_size = data.buffer_size_in_M
if buffer_size not in buffer_size_to_metrics:
buffer_size_to_metrics[buffer_size] = {}
metrics = buffer_size_to_metrics.get(buffer_size)
Reported by Bandit.
Line: 94
Column: 9
ddp_model = DDP(module, device_ids=[rank],
process_group=pg,
bucket_cap_mb=buffer_size_in_M)
ddp_model._set_static_graph()
return ddp_model
elif ddp_option == DDPOption.PYTHON_DDP_SYNC_REDUCTION:
M = 2 ** 20
return python_ddp.PythonDDP(module, pg, False, buffer_size=buffer_size_in_M * M)
elif ddp_option == DDPOption.PYTHON_DDP_ASYNC_REDUCTION:
Reported by Pylint.
torch/_appdirs.py
67 issues
Line: 498
Column: 5
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import winreg as _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
Reported by Pylint.
Line: 515
Column: 5
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
Reported by Pylint.
Line: 569
Column: 5
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
Reported by Pylint.
Line: 570
Column: 5
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
Reported by Pylint.
Line: 573
Column: 11
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
Reported by Pylint.
Line: 586
Column: 15
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
Reported by Pylint.
Line: 39
Column: 1
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""Utilities for determining application-specific dirs.
See <https://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
Reported by Pylint.
Line: 75
Column: 19
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
Reported by Pylint.
Line: 75
Column: 33
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
Reported by Pylint.
Line: 130
Column: 33
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
Reported by Pylint.
torch/autograd/function.py
66 issues
Line: 305
Column: 16
with torch.no_grad():
outputs = fn(ctx, *args)
if not torch.is_grad_enabled():
return outputs
# If any of the inputs have requires_grad=True, we force the outputs
# to have requires_grad=True but point to a grad_fn which throws an
# error message during (double) back-propagation.
Reported by Pylint.
Line: 55
Column: 9
>>> d = Func.apply(a, b, c)
"""
self.to_save = tensors
def mark_dirty(self, *args: torch.Tensor):
r"""Marks given tensors as modified in an in-place operation.
**This should be called at most once, only from inside the**
Reported by Pylint.
Line: 90
Column: 9
>>> # computation has been modified by an inplace operation
"""
self.dirty_tensors = args
def mark_shared_storage(self, *pairs):
warnings.warn(
'mark_shared_storage is deprecated. '
'Tensors with shared storages are automatically tracked. Note '
Reported by Pylint.
Line: 92
Column: 1
"""
self.dirty_tensors = args
def mark_shared_storage(self, *pairs):
warnings.warn(
'mark_shared_storage is deprecated. '
'Tensors with shared storages are automatically tracked. Note '
'that calls to `set_()` are not tracked')
Reported by Pylint.
Line: 128
Column: 9
>>> return grad_input
"""
self.non_differentiable = args
def set_materialize_grads(self, value: bool):
r"""Sets whether to materialize output grad tensors. Default is ``True``.
**This should be called only from inside the** :func:`forward` **method**
Reported by Pylint.
Line: 172
Column: 9
>>> b, _ = Func.apply(a) # induces g2 to be undefined
"""
self.materialize_grads = value
# DO NOT USE: This is only defined to be able to load old serialized models
_ContextMethodMixin = FunctionCtx
class _HookMixin(object):
Reported by Pylint.
Line: 188
Column: 25
return backward_hooks, handle
class BackwardCFunction(_C._FunctionBase, FunctionCtx, _HookMixin):
def apply(self, *args):
# _forward_cls is defined by derived class
return self._forward_cls.backward(self, *args) # type: ignore[attr-defined]
Reported by Pylint.
Line: 210
Column: 45
# mypy doesn't understand `with_metaclass` from torch._six
class Function(with_metaclass(FunctionMeta, _C._FunctionBase, FunctionCtx, _HookMixin)): # type: ignore[misc]
r"""Base class to create custom `autograd.Function`
To create a custom `autograd.Function`, subclass this class and implement
the :meth:`forward` and :meth`backward` static methods. Then, to use your custom
op in the forward pass, call the class method ``apply``. Do not call
Reported by Pylint.
Line: 241
Column: 1
>>> # Use it by calling the apply method:
>>> output = Exp.apply(input)
"""
def __init__(self, *args, **kwargs):
cls = self.__class__
warnings.warn(f"{cls} should not be instantiated. Methods on autograd functions"
"are all static, so you should invoke them on the class itself. "
"Instantiating an autograd function will raise an "
"error in a future version of PyTorch.", DeprecationWarning)
Reported by Pylint.
Line: 241
Column: 1
>>> # Use it by calling the apply method:
>>> output = Exp.apply(input)
"""
def __init__(self, *args, **kwargs):
cls = self.__class__
warnings.warn(f"{cls} should not be instantiated. Methods on autograd functions"
"are all static, so you should invoke them on the class itself. "
"Instantiating an autograd function will raise an "
"error in a future version of PyTorch.", DeprecationWarning)
Reported by Pylint.
test/onnx/test_verify.py
66 issues
Line: 1
Column: 1
import torch
from torch.autograd import Function
from torch.nn import Module, Parameter
import caffe2.python.onnx.backend as backend
from verify import verify
from test_pytorch_common import TestCase, run_tests
Reported by Pylint.
Line: 2
Column: 1
import torch
from torch.autograd import Function
from torch.nn import Module, Parameter
import caffe2.python.onnx.backend as backend
from verify import verify
from test_pytorch_common import TestCase, run_tests
Reported by Pylint.
Line: 3
Column: 1
import torch
from torch.autograd import Function
from torch.nn import Module, Parameter
import caffe2.python.onnx.backend as backend
from verify import verify
from test_pytorch_common import TestCase, run_tests
Reported by Pylint.
Line: 4
Column: 1
import torch
from torch.autograd import Function
from torch.nn import Module, Parameter
import caffe2.python.onnx.backend as backend
from verify import verify
from test_pytorch_common import TestCase, run_tests
Reported by Pylint.
Line: 7
Column: 1
import caffe2.python.onnx.backend as backend
from verify import verify
from test_pytorch_common import TestCase, run_tests
class TestVerify(TestCase):
maxDiff = None
Reported by Pylint.
Line: 7
Column: 1
import caffe2.python.onnx.backend as backend
from verify import verify
from test_pytorch_common import TestCase, run_tests
class TestVerify(TestCase):
maxDiff = None
Reported by Pylint.
Line: 37
Column: 25
return g.op("Add", a, b)
@staticmethod
def forward(ctx, a, b):
return a.sub(b) # yahaha! you found me!
class MyModel(Module):
def forward(self, x, y):
return BrokenAdd().apply(x, y)
Reported by Pylint.
Line: 50
Column: 13
def test_jumbled_params(self):
class MyModel(Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
y = x * x
self.param = Parameter(torch.tensor([2.0]))
Reported by Pylint.
Line: 55
Column: 17
def forward(self, x):
y = x * x
self.param = Parameter(torch.tensor([2.0]))
return y
x = torch.tensor([1, 2])
with self.assertRaisesRegex(RuntimeError, "state_dict changed"):
verify(MyModel(), x, backend)
Reported by Pylint.
Line: 1
Column: 1
import torch
from torch.autograd import Function
from torch.nn import Module, Parameter
import caffe2.python.onnx.backend as backend
from verify import verify
from test_pytorch_common import TestCase, run_tests
Reported by Pylint.
test/jit/test_logging.py
66 issues
Line: 4
Column: 1
import os
import sys
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 9
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 21
Column: 21
class ModuleThatLogs(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(x.size(0)):
x += 1.0
torch.jit._logging.add_stat_value('foo', 1)
if bool(x.sum() > 0.0):
torch.jit._logging.add_stat_value('positive', 1)
Reported by Pylint.
Line: 23
Column: 21
def forward(self, x):
for i in range(x.size(0)):
x += 1.0
torch.jit._logging.add_stat_value('foo', 1)
if bool(x.sum() > 0.0):
torch.jit._logging.add_stat_value('positive', 1)
else:
torch.jit._logging.add_stat_value('negative', 1)
Reported by Pylint.
Line: 26
Column: 21
torch.jit._logging.add_stat_value('foo', 1)
if bool(x.sum() > 0.0):
torch.jit._logging.add_stat_value('positive', 1)
else:
torch.jit._logging.add_stat_value('negative', 1)
return x
logger = torch.jit._logging.LockingLogger()
Reported by Pylint.
Line: 28
Column: 21
if bool(x.sum() > 0.0):
torch.jit._logging.add_stat_value('positive', 1)
else:
torch.jit._logging.add_stat_value('negative', 1)
return x
logger = torch.jit._logging.LockingLogger()
old_logger = torch.jit._logging.set_logger(logger)
try:
Reported by Pylint.
Line: 31
Column: 18
torch.jit._logging.add_stat_value('negative', 1)
return x
logger = torch.jit._logging.LockingLogger()
old_logger = torch.jit._logging.set_logger(logger)
try:
mtl = ModuleThatLogs()
for i in range(5):
Reported by Pylint.
Line: 32
Column: 22
return x
logger = torch.jit._logging.LockingLogger()
old_logger = torch.jit._logging.set_logger(logger)
try:
mtl = ModuleThatLogs()
for i in range(5):
mtl(torch.rand(3, 4, 5))
Reported by Pylint.
Line: 36
Column: 17
try:
mtl = ModuleThatLogs()
for i in range(5):
mtl(torch.rand(3, 4, 5))
self.assertEqual(logger.get_counter_val('foo'), 15)
self.assertEqual(logger.get_counter_val('positive'), 5)
finally:
Reported by Pylint.
Line: 42
Column: 13
self.assertEqual(logger.get_counter_val('foo'), 15)
self.assertEqual(logger.get_counter_val('positive'), 5)
finally:
torch.jit._logging.set_logger(old_logger)
def test_trace_numeric_counter(self):
def foo(x):
torch.jit._logging.add_stat_value('foo', 1)
return x + 1.0
Reported by Pylint.
torch/quantization/ns/weight_utils.py
66 issues
Line: 14
Column: 1
from torch.fx import GraphModule
from torch.fx.graph import Node
from .utils import (
get_target_type_str,
getattr_from_fqn,
return_first_non_observer_node,
)
Reported by Pylint.
Line: 20
Column: 1
return_first_non_observer_node,
)
from .ns_types import (
NSSingleResultValuesType,
NSSingleResultType,
)
from typing import List, Optional, Dict, Callable
Reported by Pylint.
Line: 34
Column: 12
return mod[0].weight.detach() # type: ignore[index]
def mod_weight_bias_0(mod: nn.Module) -> torch.Tensor:
return mod._weight_bias()[0] # type: ignore[operator]
def get_lstm_weight(mod: nn.Module) -> List[torch.Tensor]:
res = []
for idx, param_name in enumerate(mod._flat_weights_names): # type: ignore[arg-type]
if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name:
Reported by Pylint.
Line: 38
Column: 38
def get_lstm_weight(mod: nn.Module) -> List[torch.Tensor]:
res = []
for idx, param_name in enumerate(mod._flat_weights_names): # type: ignore[arg-type]
if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name:
param_value = mod._flat_weights[idx].detach() # type: ignore[index]
res.append(param_value)
return res
Reported by Pylint.
Line: 40
Column: 27
res = []
for idx, param_name in enumerate(mod._flat_weights_names): # type: ignore[arg-type]
if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name:
param_value = mod._flat_weights[idx].detach() # type: ignore[index]
res.append(param_value)
return res
def get_qlstm_weight(mod: nn.Module) -> List[torch.Tensor]:
res = []
Reported by Pylint.
Line: 46
Column: 25
def get_qlstm_weight(mod: nn.Module) -> List[torch.Tensor]:
res = []
for weight_value in mod._all_weight_values: # type: ignore[union-attr]
res.append(weight_value.param.__getstate__()[0][4][0].__getstate__()[0][0])
res.append(weight_value.param.__getstate__()[0][4][1].__getstate__()[0][0])
return res
def get_conv_mod_weight(mod: nn.Module) -> torch.Tensor:
Reported by Pylint.
Line: 65
Column: 16
):
return mod[0].weight.detach()
else:
return mod._weight_bias()[0] # type: ignore[operator]
def get_linear_mod_weight(mod: nn.Module) -> torch.Tensor:
if isinstance(mod, nn.Linear):
return mod.weight.detach()
elif isinstance(mod, nni.LinearReLU):
Reported by Pylint.
Line: 73
Column: 16
elif isinstance(mod, nni.LinearReLU):
return mod[0].weight.detach()
else:
return mod._weight_bias()[0] # type: ignore[operator]
def get_lstm_mod_weights(mod: nn.Module) -> List[torch.Tensor]:
# TODO(future PR): make more generic, handle everything
if isinstance(mod, nn.LSTM):
res = []
Reported by Pylint.
Line: 76
Column: 3
return mod._weight_bias()[0] # type: ignore[operator]
def get_lstm_mod_weights(mod: nn.Module) -> List[torch.Tensor]:
# TODO(future PR): make more generic, handle everything
if isinstance(mod, nn.LSTM):
res = []
for idx, param_name in enumerate(mod._flat_weights_names):
if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name:
param_value = mod._flat_weights[idx].detach()
Reported by Pylint.
Line: 79
Column: 42
# TODO(future PR): make more generic, handle everything
if isinstance(mod, nn.LSTM):
res = []
for idx, param_name in enumerate(mod._flat_weights_names):
if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name:
param_value = mod._flat_weights[idx].detach()
res.append(param_value)
return res
else:
Reported by Pylint.
torch/utils/benchmark/utils/valgrind_wrapper/timer_interface.py
66 issues
Line: 26
Column: 28
if TYPE_CHECKING:
CompletedProcessType = subprocess.CompletedProcess[str]
else:
CompletedProcessType = subprocess.CompletedProcess
FunctionCount = NamedTuple("FunctionCount", [("count", int), ("function", str)])
Reported by Pylint.
Line: 178
Column: 9
stmt_callgrind_out: Optional[str]
def __repr__(self) -> str:
newline = "\n" # `\` cannot appear in fstring code section.
base_stats = self.baseline_exclusive_stats
output = f"""
{super().__repr__()}
{self.task_spec.summarize()}
{'':>25}All{'':>10}Noisy symbols removed
Reported by Pylint.
Line: 217
Column: 3
stats = self.stmt_exclusive_stats
return (stats.denoise() if denoise else stats).sum()
# FIXME: Once 3.7 is the minimum version, type annotate `other` per PEP 563
def delta(
self,
other, # type: CallgrindStats
inclusive: bool = False,
) -> FunctionCounts:
Reported by Pylint.
Line: 272
Column: 59
)
for before, after in transforms:
stats = stats.transform(lambda fn: re.sub(before, after, fn))
return stats
return CallgrindStats(
task_spec=self.task_spec,
Reported by Pylint.
Line: 272
Column: 67
)
for before, after in transforms:
stats = stats.transform(lambda fn: re.sub(before, after, fn))
return stats
return CallgrindStats(
task_spec=self.task_spec,
Reported by Pylint.
Line: 340
Column: 20
return self._serialization
@staticmethod
def unwrap_all(globals: Dict[str, Any]) -> Dict[str, Any]:
return {
k: (v.value if isinstance(v, CopyIfCallgrind) else v)
for k, v in globals.items()
}
Reported by Pylint.
Line: 420
Column: 24
operations.
"""
def __init__(self, globals: Dict[str, Any], data_dir: str) -> None:
self._globals: Dict[str, CopyIfCallgrind] = {}
self._data_dir = data_dir
if not os.path.exists(data_dir):
os.mkdir(data_dir)
Reported by Pylint.
Line: 495
Column: 53
if self._supported_platform:
# Only bother checking on supported platforms.
for cmd in ("valgrind", "callgrind_control", "callgrind_annotate"):
self._commands_available[cmd] = not subprocess.run(
["which", cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).returncode
Reported by Pylint.
Line: 517
Column: 9
def collect_callgrind(
self,
task_spec: common.TaskSpec,
globals: Dict[str, Any],
*,
number: int,
repeats: int,
collect_baseline: bool,
is_python: bool,
Reported by Pylint.
Line: 558
Column: 9
self,
*,
task_spec: common.TaskSpec,
globals: Dict[str, Any],
number: int,
repeats: int,
collect_baseline: bool,
is_python: bool,
retain_out_file: bool,
Reported by Pylint.