The following issues were found
test/distributed/pipeline/sync/test_balance.py
87 issues
Line: 9
Column: 1
# LICENSE file in the root directory of this source tree.
import time
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox
Reported by Pylint.
Line: 10
Column: 1
import time
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox
Reported by Pylint.
Line: 11
Column: 1
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
Reported by Pylint.
Line: 13
Column: 1
import torch
from torch import nn
from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
devices = ["cpu"]
Reported by Pylint.
Line: 14
Column: 1
from torch import nn
from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
devices = ["cpu"]
if torch.cuda.is_available():
Reported by Pylint.
Line: 83
Column: 17
self.times = times
def forward(self, x):
for i in range(self.times):
x = x + torch.rand_like(x, requires_grad=True)
return x
sample = torch.rand(10, 100, 100)
Reported by Pylint.
Line: 120
Column: 17
self.latent_size = latent_size
def forward(self, x):
for i in range(self.latent_size):
x = x + torch.rand_like(x, requires_grad=True)
return x
model = nn.Sequential(
Tradeoff(param_size=1, latent_size=6),
Reported by Pylint.
Line: 1
Column: 1
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import time
import pytest
Reported by Pylint.
Line: 13
Column: 1
import torch
from torch import nn
from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition
from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
devices = ["cpu"]
Reported by Pylint.
Line: 23
Column: 1
devices.append("cuda")
def test_blockpartition():
assert blockpartition.solve([1, 2, 3, 4, 5, 6], partitions=2) == [[1, 2, 3, 4], [5, 6]]
def test_blockpartition_zeros():
assert blockpartition.solve([0, 0], partitions=2) == [[0], [0]]
Reported by Pylint.
test/test_dispatch.py
86 issues
Line: 1
Column: 1
import torch._C as C
from torch.testing._internal.common_utils import TestCase, run_tests
from torch._python_dispatcher import PythonDispatcher
from collections import namedtuple
import itertools
import os
import re
import torch.utils.cpp_extension
Reported by Pylint.
Line: 2
Column: 1
import torch._C as C
from torch.testing._internal.common_utils import TestCase, run_tests
from torch._python_dispatcher import PythonDispatcher
from collections import namedtuple
import itertools
import os
import re
import torch.utils.cpp_extension
Reported by Pylint.
Line: 3
Column: 1
import torch._C as C
from torch.testing._internal.common_utils import TestCase, run_tests
from torch._python_dispatcher import PythonDispatcher
from collections import namedtuple
import itertools
import os
import re
import torch.utils.cpp_extension
Reported by Pylint.
Line: 9
Column: 1
import itertools
import os
import re
import torch.utils.cpp_extension
# TODO: Expand the dispatcher API to be a generic API for interfacing with
# the dispatcher from Python!
#
# These are exhaustive tests for commutativity of dispatch behavior. If you're
Reported by Pylint.
Line: 11
Column: 3
import re
import torch.utils.cpp_extension
# TODO: Expand the dispatcher API to be a generic API for interfacing with
# the dispatcher from Python!
#
# These are exhaustive tests for commutativity of dispatch behavior. If you're
# looking for more usage-info style tests, check op_registration_test.cpp
#
Reported by Pylint.
Line: 56
Column: 9
def test_all_invariants(self):
# Check that the regular stuff is OK!
C._dispatch_check_all_invariants()
# You probably don't want to call this directly; if your constructors
# don't commute, you can still run commute with a fixed ctor_order
# so that you can test that the destructors still commute
def run_ops(self, name, ops, ctor_order=None, dtor_order=None,
Reported by Pylint.
Line: 105
Column: 13
test_namespace = "__test{}__".format(self.namespace_index)
def check_invariants(actual_provenance):
C._dispatch_check_invariants(name)
# Normalize the test namespace so that expected outputs are stable
actual_state = C._dispatch_dump(
"{}::{}".format(test_namespace, name)).replace(test_namespace, "test")
actual_table = C._dispatch_dump_table(
"{}::{}".format(test_namespace, name)).replace(test_namespace, "test")
Reported by Pylint.
Line: 107
Column: 28
def check_invariants(actual_provenance):
C._dispatch_check_invariants(name)
# Normalize the test namespace so that expected outputs are stable
actual_state = C._dispatch_dump(
"{}::{}".format(test_namespace, name)).replace(test_namespace, "test")
actual_table = C._dispatch_dump_table(
"{}::{}".format(test_namespace, name)).replace(test_namespace, "test")
expected_state, expected_table, expected_provenance = results.setdefault(
frozenset(active_ops),
Reported by Pylint.
Line: 109
Column: 28
# Normalize the test namespace so that expected outputs are stable
actual_state = C._dispatch_dump(
"{}::{}".format(test_namespace, name)).replace(test_namespace, "test")
actual_table = C._dispatch_dump_table(
"{}::{}".format(test_namespace, name)).replace(test_namespace, "test")
expected_state, expected_table, expected_provenance = results.setdefault(
frozenset(active_ops),
Result(actual_state, actual_table, actual_provenance)
)
Reported by Pylint.
Line: 135
Column: 27
# lifetime of multiple registrations with multiple Library
# references (refs), we can't deal with the strict checking
# from DEF.
refs[op_ix] = C._dispatch_library("FRAGMENT", test_namespace, "")
active_ops.add(op_ix)
try:
ops[op_ix](refs[op_ix])
check_invariants("running ctors {}".format(ctor_order[:i + 1]))
except RuntimeError as e:
Reported by Pylint.
caffe2/python/utils.py
86 issues
Line: 10
Column: 1
from caffe2.proto import caffe2_pb2
from future.utils import viewitems
from google.protobuf.message import DecodeError, Message
from google.protobuf import text_format
import sys
import collections
import copy
Reported by Pylint.
Line: 11
Column: 1
from caffe2.proto import caffe2_pb2
from future.utils import viewitems
from google.protobuf.message import DecodeError, Message
from google.protobuf import text_format
import sys
import collections
import copy
import functools
Reported by Pylint.
Line: 84
Column: 3
return np.asarray(
tensor.int32_data, dtype=np.uint8).reshape(tensor.dims) # pb.UINT8=>np.uint8 use int32_data
else:
# TODO: complete the data type: bool, float16, byte, int64, string
raise RuntimeError(
"Tensor data type not supported yet: " + str(tensor.data_type))
def NumpyArrayToCaffe2Tensor(arr, name=None):
Reported by Pylint.
Line: 119
Column: 3
tensor.data_type = caffe2_pb2.TensorProto.UINT8
tensor.int32_data.extend(list(arr.flatten().astype(np.uint8))) # np.uint8=>pb.UNIT8 use int32_data
else:
# TODO: complete the data type: bool, float16, byte, string
raise RuntimeError(
"Numpy data type not supported yet: " + str(arr.dtype))
return tensor
Reported by Pylint.
Line: 234
Column: 5
return func(obj)
except DecodeError:
continue
else:
raise DecodeError("Cannot find a fit protobuffer class.")
def ConvertProtoToBinary(proto_class, filename, out_filename):
"""Convert a text file of the given protobuf class to binary."""
Reported by Pylint.
Line: 299
Column: 9
def run(cls, func):
try:
return func()
except KeyboardInterrupt:
raise
except Exception:
import pdb
print(
Reported by Pylint.
Line: 308
Column: 13
'Entering interactive debugger. Type "bt" to print '
'the full stacktrace. Type "help" to see command listing.')
print(sys.exc_info()[1])
print
pdb.post_mortem()
sys.exit(1)
raise
Reported by Pylint.
Line: 344
Column: 5
def BuildUniqueMutexIter(
init_net,
net,
iter=None,
iter_mutex=None,
iter_val=0
):
'''
Often, a mutex guarded iteration counter is needed. This function creates a
Reported by Pylint.
Line: 388
Column: 21
v = getattr(cls, k)
if isinstance(v, string_types):
assert v not in enum.values(), (
"Failed to resolve {} as Enum: "
"duplicate entries {}={}, {}={}".format(
cls, k, v, [key for key in enum if enum[key] == v][0], v
)
)
enum[k] = v
Reported by Pylint.
Line: 1
Column: 1
# @package utils
# Module caffe2.python.utils
from caffe2.proto import caffe2_pb2
from future.utils import viewitems
Reported by Pylint.
torch/autograd/functional.py
86 issues
Line: 178
Column: 23
"mode as it prevents from using the double backward trick to "
"replace forward mode AD.".format(i))
grads_i = torch.zeros_like(refs[i])
else:
if strict and create_graph and not grads_i.requires_grad:
if "double" not in stage:
raise RuntimeError("The jacobian of the user-provided function is independent of "
"input {}. This is not allowed in strict mode when create_graph=True.".format(i))
Reported by Pylint.
Line: 271
Column: 45
"user-provided function returns "
"a single Tensor with a single element.")
enable_grad = True if create_graph else torch.is_grad_enabled()
with torch.set_grad_enabled(enable_grad):
grad_res = _autograd_grad(outputs, inputs, v, create_graph=create_graph)
vjp = _fill_in_zeros(grad_res, inputs, strict, create_graph, "back")
# Cleanup objects and return them to the user
Reported by Pylint.
Line: 361
Column: 30
# The backward is linear so the value of grad_outputs is not important as
# it won't appear in the double backward graph. We only need to ensure that
# it does not contain inf or nan.
grad_outputs = tuple(torch.zeros_like(out, requires_grad=True) for out in outputs)
grad_inputs = _autograd_grad(outputs, inputs, grad_outputs, create_graph=True)
_check_requires_grad(grad_inputs, "grad_inputs", strict=strict)
if create_graph:
Reported by Pylint.
Line: 403
Column: 31
assert len(tensors) == len(tensor_numels)
assert len(tensors) > 0
total_numel = sum(tensor_numels)
diag_start_indices = (0, *torch.tensor(tensor_numels).cumsum(dim=0)[:-1].neg().unbind())
chunks = tuple(tensor.new_zeros(total_numel, tensor_numel)
for tensor, tensor_numel in zip(tensors, tensor_numels))
for chunk, diag_start_idx in zip(chunks, diag_start_indices):
chunk.diagonal(diag_start_idx).fill_(1)
return chunks
Reported by Pylint.
Line: 547
Column: 34
for el_idx, vj_el in enumerate(vj):
if vj_el is not None:
continue
vj[el_idx] = torch.zeros_like(inputs[el_idx])
return tuple(vj)
jacobians_of_flat_output = _vmap(vjp)(grad_outputs)
# Step 3: The returned jacobian is one big tensor per input. In this step,
Reported by Pylint.
Line: 596
Column: 41
"independent of input {}. This is not allowed in "
"strict mode.".format(i, el_idx))
raise RuntimeError(msg)
jac_i_el.append(torch.zeros_like(inp_el))
jacobian += (tuple(torch.stack(jac_i_el, dim=0).view(out.size()
+ inputs[el_idx].size()) for (el_idx, jac_i_el) in enumerate(jac_i)), )
jacobian = _grad_postprocess(jacobian, create_graph)
Reported by Pylint.
Line: 598
Column: 32
raise RuntimeError(msg)
jac_i_el.append(torch.zeros_like(inp_el))
jacobian += (tuple(torch.stack(jac_i_el, dim=0).view(out.size()
+ inputs[el_idx].size()) for (el_idx, jac_i_el) in enumerate(jac_i)), )
jacobian = _grad_postprocess(jacobian, create_graph)
return _tuple_postprocess(jacobian, (is_outputs_tuple, is_inputs_tuple))
Reported by Pylint.
Line: 785
Column: 45
jac = _autograd_grad(outputs, inputs, create_graph=True)
_check_requires_grad(jac, "jacobian", strict=strict)
enable_grad = True if create_graph else torch.is_grad_enabled()
with torch.set_grad_enabled(enable_grad):
grad_res = _autograd_grad(jac, inputs, v, create_graph=create_graph)
vhp = _fill_in_zeros(grad_res, inputs, strict, create_graph, "double_back")
outputs = _grad_postprocess(outputs, create_graph)
Reported by Pylint.
Line: 885
Column: 26
jac = _autograd_grad(outputs, inputs, create_graph=True)
_check_requires_grad(jac, "jacobian", strict=strict)
grad_jac = tuple(torch.zeros_like(inp, requires_grad=True) for inp in inputs)
double_back = _autograd_grad(jac, inputs, grad_jac, create_graph=True)
_check_requires_grad(jac, "hessian", strict=strict)
enable_grad = True if create_graph else torch.is_grad_enabled()
Reported by Pylint.
Line: 890
Column: 45
double_back = _autograd_grad(jac, inputs, grad_jac, create_graph=True)
_check_requires_grad(jac, "hessian", strict=strict)
enable_grad = True if create_graph else torch.is_grad_enabled()
with torch.set_grad_enabled(enable_grad):
grad_res = _autograd_grad(double_back, grad_jac, v, create_graph=create_graph)
hvp = _fill_in_zeros(grad_res, inputs, strict, create_graph, "double_back_trick")
outputs = _grad_postprocess(outputs, create_graph)
Reported by Pylint.
test/distributed/pipeline/sync/skip/test_stash_pop.py
84 issues
Line: 7
Column: 1
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, use_skip_tracker
Reported by Pylint.
Line: 8
Column: 1
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, use_skip_tracker
Reported by Pylint.
Line: 9
Column: 1
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, use_skip_tracker
Reported by Pylint.
Line: 11
Column: 1
import torch
from torch import nn
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, use_skip_tracker
@pytest.fixture(autouse=True)
def skip_tracker():
Reported by Pylint.
Line: 12
Column: 1
from torch import nn
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, use_skip_tracker
@pytest.fixture(autouse=True)
def skip_tracker():
skip_tracker = SkipTracker()
Reported by Pylint.
Line: 17
Column: 5
@pytest.fixture(autouse=True)
def skip_tracker():
skip_tracker = SkipTracker()
with use_skip_tracker(skip_tracker):
yield skip_tracker
def test_stash(skip_tracker):
Reported by Pylint.
Line: 22
Column: 16
yield skip_tracker
def test_stash(skip_tracker):
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa: B901
Reported by Pylint.
Line: 25
Column: 27
def test_stash(skip_tracker):
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa: B901
l1 = Stash()
Reported by Pylint.
Line: 42
Column: 27
def test_pop():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa: B901
@skippable(pop=["foo"])
class Pop(nn.Module):
Reported by Pylint.
Line: 48
Column: 27
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo
l1 = Stash()
l2 = Pop()
Reported by Pylint.
torch/package/package_importer.py
84 issues
Line: 16
Column: 1
import torch
from torch.serialization import _get_restore_location, _maybe_decode_ascii
from ._directory_reader import DirectoryReader
from ._importlib import (
_calc___package__,
_normalize_line_endings,
_normalize_path,
_resolve_name,
Reported by Pylint.
Line: 17
Column: 1
from torch.serialization import _get_restore_location, _maybe_decode_ascii
from ._directory_reader import DirectoryReader
from ._importlib import (
_calc___package__,
_normalize_line_endings,
_normalize_path,
_resolve_name,
_sanity_check,
Reported by Pylint.
Line: 24
Column: 1
_resolve_name,
_sanity_check,
)
from ._mangling import PackageMangler, demangle
from ._package_unpickler import PackageUnpickler
from .file_structure_representation import Directory, _create_directory_from_file_list
from .glob_group import GlobPattern
from .importer import Importer
Reported by Pylint.
Line: 25
Column: 1
_sanity_check,
)
from ._mangling import PackageMangler, demangle
from ._package_unpickler import PackageUnpickler
from .file_structure_representation import Directory, _create_directory_from_file_list
from .glob_group import GlobPattern
from .importer import Importer
Reported by Pylint.
Line: 26
Column: 1
)
from ._mangling import PackageMangler, demangle
from ._package_unpickler import PackageUnpickler
from .file_structure_representation import Directory, _create_directory_from_file_list
from .glob_group import GlobPattern
from .importer import Importer
class PackageImporter(Importer):
Reported by Pylint.
Line: 27
Column: 1
from ._mangling import PackageMangler, demangle
from ._package_unpickler import PackageUnpickler
from .file_structure_representation import Directory, _create_directory_from_file_list
from .glob_group import GlobPattern
from .importer import Importer
class PackageImporter(Importer):
"""Importers allow you to load code written to packages by :class:`PackageExporter`.
Reported by Pylint.
Line: 28
Column: 1
from ._package_unpickler import PackageUnpickler
from .file_structure_representation import Directory, _create_directory_from_file_list
from .glob_group import GlobPattern
from .importer import Importer
class PackageImporter(Importer):
"""Importers allow you to load code written to packages by :class:`PackageExporter`.
Code is loaded in a hermetic way, using files from the package
Reported by Pylint.
Line: 27
Column: 1
from ._mangling import PackageMangler, demangle
from ._package_unpickler import PackageUnpickler
from .file_structure_representation import Directory, _create_directory_from_file_list
from .glob_group import GlobPattern
from .importer import Importer
class PackageImporter(Importer):
"""Importers allow you to load code written to packages by :class:`PackageExporter`.
Reported by Pylint.
Line: 45
Column: 5
a locally-installed package, but then fails when the package is copied to another machine.
"""
"""The dictionary of already loaded modules from this package, equivalent to ``sys.modules`` but
local to this importer.
"""
modules: Dict[str, types.ModuleType]
def __init__(
Reported by Pylint.
Line: 111
Column: 40
# used for torch.serialization._load
self.Unpickler = lambda *args, **kwargs: PackageUnpickler(self, *args, **kwargs)
def import_module(self, name: str, package=None):
"""Load a module from the package if it hasn't already been loaded, and then return
the module. Modules are loaded locally
to the importer and will appear in ``self.modules`` rather than ``sys.modules``.
Args:
Reported by Pylint.
torch/fx/node.py
84 issues
Line: 3
Column: 1
# Nodes represent a definition of a value in our graph of operators.
from typing import TYPE_CHECKING, Union, Callable, Any, Tuple, List, Optional, Dict, Set
from .immutable_collections import immutable_dict, immutable_list
import torch
import builtins
import types
from torch.fx.operator_schemas import normalize_function, normalize_module, ArgsKwargsPair
if TYPE_CHECKING:
Reported by Pylint.
Line: 10
Column: 5
from torch.fx.operator_schemas import normalize_function, normalize_module, ArgsKwargsPair
if TYPE_CHECKING:
from .graph import Graph
BaseArgumentTypes = Union[str, int, float, bool, torch.dtype, torch.Tensor, torch.device, torch.memory_format]
base_types = BaseArgumentTypes.__args__ # type: ignore[attr-defined]
Target = Union[Callable[..., Any], str]
Reported by Pylint.
Line: 12
Column: 91
if TYPE_CHECKING:
from .graph import Graph
BaseArgumentTypes = Union[str, int, float, bool, torch.dtype, torch.Tensor, torch.device, torch.memory_format]
base_types = BaseArgumentTypes.__args__ # type: ignore[attr-defined]
Target = Union[Callable[..., Any], str]
Argument = Optional[Union[
Reported by Pylint.
Line: 12
Column: 50
if TYPE_CHECKING:
from .graph import Graph
BaseArgumentTypes = Union[str, int, float, bool, torch.dtype, torch.Tensor, torch.device, torch.memory_format]
base_types = BaseArgumentTypes.__args__ # type: ignore[attr-defined]
Target = Union[Callable[..., Any], str]
Argument = Optional[Union[
Reported by Pylint.
Line: 12
Column: 77
if TYPE_CHECKING:
from .graph import Graph
BaseArgumentTypes = Union[str, int, float, bool, torch.dtype, torch.Tensor, torch.device, torch.memory_format]
base_types = BaseArgumentTypes.__args__ # type: ignore[attr-defined]
Target = Union[Callable[..., Any], str]
Argument = Optional[Union[
Reported by Pylint.
Line: 337
Column: 20
def __repr__(self) -> str:
if self._repr_fn:
return self._repr_fn(self)
return self.name
def _pretty_print_target(self, target):
"""
Make target printouts more user-friendly.
Reported by Pylint.
Line: 26
Column: 45
BaseArgumentTypes
]]
_side_effectful_functions: Set[Callable] = {torch._assert}
# this is fixed on master, WAR for 1.5
def _find_module_of_method(orig_method: Callable[..., Any]) -> str:
name = orig_method.__name__
module = orig_method.__module__
Reported by Pylint.
Line: 117
Column: 18
"""
def __init__(self, graph: 'Graph', name: str, op: str, target: 'Target',
args: Tuple['Argument', ...], kwargs: Dict[str, 'Argument'],
type : Optional[Any] = None) -> None:
self.graph = graph
self.name = name # unique name of value being created
assert op in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output', 'root']
self.op = op # the kind of operation = placeholder|call_method|call_module|call_function|get_attr
if op in ['call_method', 'call_module']:
Reported by Pylint.
Line: 197
Column: 9
x (Node): The node to put before this node. Must be a member of the same graph.
"""
assert self.graph == x.graph, "Attempting to move a Node into a different Graph"
x._remove_from_list()
p = self._prev
p._next, x._prev = x, p
x._next, self._prev = self, x
def append(self, x: 'Node') -> None:
Reported by Pylint.
Line: 329
Column: 29
old_use.users.pop(self)
self._input_nodes = {}
map_arg(self._args, lambda n: self._input_nodes.setdefault(n))
map_arg(self._kwargs, lambda n: self._input_nodes.setdefault(n))
for new_use in self._input_nodes.keys():
new_use.users.setdefault(self)
Reported by Pylint.
torch/quantization/ns/mappings.py
84 issues
Line: 15
Column: 1
import torch.nn.intrinsic as nni
import torch.nn.qat as nnqat
from .ns_types import NSNodeTargetType
from typing import Set, Dict, List, Optional
def get_base_name_to_sets_of_related_ops() -> Dict[str, Set[NSNodeTargetType]]:
Reported by Pylint.
Line: 87
Column: 13
# average pool
set([
nn.AvgPool1d,
torch.avg_pool1d,
]),
set([
nn.AvgPool2d,
torch._C._nn.avg_pool2d,
]),
Reported by Pylint.
Line: 117
Column: 13
]),
# add
set([
torch.add,
toq.add,
operator.add, # x + y
toq.add_relu,
]),
# cat
Reported by Pylint.
Line: 124
Column: 13
]),
# cat
set([
torch.cat,
toq.cat,
]),
# mul
set([
torch.mul,
Reported by Pylint.
Line: 129
Column: 13
]),
# mul
set([
torch.mul,
toq.mul,
operator.mul,
toq.mul_relu,
]),
# relu
Reported by Pylint.
Line: 140
Column: 13
nn.ReLU,
'relu',
'relu_',
torch.relu,
]),
# maxpool
set([
nn.MaxPool1d,
F.max_pool1d,
Reported by Pylint.
Line: 157
Column: 13
]),
# sigmoid
set([
torch.sigmoid,
'sigmoid',
'sigmoid_',
nn.Sigmoid,
F.sigmoid,
]),
Reported by Pylint.
Line: 283
Column: 13
set([
nn.Tanh,
F.tanh,
torch.tanh,
'tanh_',
'tanh',
]),
# F.hardsigmoid
set([
Reported by Pylint.
Line: 306
Column: 13
]),
# unsqueeze
set([
torch.unsqueeze,
]),
# stack
set([
torch.stack,
]),
Reported by Pylint.
Line: 310
Column: 13
]),
# stack
set([
torch.stack,
]),
# squeeze
set([
torch.squeeze,
]),
Reported by Pylint.
torch/fx/graph_module.py
83 issues
Line: 8
Column: 1
from torch.package import PackageImporter, PackageExporter
import linecache
from typing import Type, Dict, List, Any, Union, Optional, Set
from .graph import Graph, _is_from_torch, _custom_builtins, PythonCode
from torch.package import Importer, sys_importer
import copy
import itertools
import sys
import traceback
Reported by Pylint.
Line: 111
Column: 9
tracer_cls = body.get('_tracer_cls')
if tracer_cls is None:
from ._symbolic_trace import Tracer
tracer_cls = Tracer
graphmodule_cls_name = body.get('_graphmodule_cls_name', 'GraphModule')
# This is a workaround for a mypy linter issue related to
Reported by Pylint.
Line: 561
Column: 28
if cls_call is not None:
return cls_call(self, *args, **kwargs)
else:
return super(type(self), self).__call__(*args, **kwargs)
except Exception as e:
assert e.__traceback__
topmost_framesummary: traceback.FrameSummary = \
traceback.StackSummary.extract(traceback.walk_tb(e.__traceback__))[-1] # type: ignore[arg-type]
if "eval_with_key" in topmost_framesummary.filename:
Reported by Pylint.
Line: 7
Column: 1
from torch.nn.modules.module import _addindent
from torch.package import PackageImporter, PackageExporter
import linecache
from typing import Type, Dict, List, Any, Union, Optional, Set
from .graph import Graph, _is_from_torch, _custom_builtins, PythonCode
from torch.package import Importer, sys_importer
import copy
import itertools
import sys
Reported by Pylint.
Line: 23
Column: 32
# using exec_with_source will add it to our local cache
# and then tools like TorchScript will be able to get source info.
_next_id = 0
def exec_with_source(src: str, globals: Dict[str, Any]):
global _next_id
key = f'<eval_with_key_{_next_id}>'
_next_id += 1
_eval_cache[key] = [line + '\n' for line in src.splitlines()]
exec(compile(src, key, 'exec'), globals)
Reported by Pylint.
Line: 24
Column: 5
# and then tools like TorchScript will be able to get source info.
_next_id = 0
def exec_with_source(src: str, globals: Dict[str, Any]):
global _next_id
key = f'<eval_with_key_{_next_id}>'
_next_id += 1
_eval_cache[key] = [line + '\n' for line in src.splitlines()]
exec(compile(src, key, 'exec'), globals)
Reported by Pylint.
Line: 28
Column: 5
key = f'<eval_with_key_{_next_id}>'
_next_id += 1
_eval_cache[key] = [line + '\n' for line in src.splitlines()]
exec(compile(src, key, 'exec'), globals)
# patch linecache so that any code we exec using exec_with_source
# works with inspect
_eval_cache : Dict[str, List[str]] = {}
_orig_getlines = linecache.getlines
Reported by Pylint.
Line: 28
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b102_exec_used.html
key = f'<eval_with_key_{_next_id}>'
_next_id += 1
_eval_cache[key] = [line + '\n' for line in src.splitlines()]
exec(compile(src, key, 'exec'), globals)
# patch linecache so that any code we exec using exec_with_source
# works with inspect
_eval_cache : Dict[str, List[str]] = {}
_orig_getlines = linecache.getlines
Reported by Bandit.
Line: 41
Column: 33
linecache.getlines = patched_getline
def _forward_from_src(src: str, globals: Dict[str, Any]):
# avoid mutating the passed in dict
globals_copy = globals.copy()
exec_with_source(src, globals_copy)
forward_fn = globals_copy['forward']
del globals_copy['forward']
Reported by Pylint.
Line: 59
Column: 26
return f'from {module_name} import {attr_name} as {name}'
def _format_import_block(globals: Dict[str, Any], importer: Importer):
import_strs: Set[str] = set()
for name, obj in globals.items():
import_strs.add(_format_import_statement(name, obj, importer))
return '\n'.join(import_strs)
Reported by Pylint.
torch/testing/_internal/hypothesis_utils.py
83 issues
Line: 6
Column: 1
import numpy as np
import torch
import hypothesis
from functools import reduce
from hypothesis import assume
from hypothesis import settings
from hypothesis import strategies as st
from hypothesis.extra import numpy as stnp
Reported by Pylint.
Line: 8
Column: 1
import hypothesis
from functools import reduce
from hypothesis import assume
from hypothesis import settings
from hypothesis import strategies as st
from hypothesis.extra import numpy as stnp
from hypothesis.strategies import SearchStrategy
Reported by Pylint.
Line: 9
Column: 1
import hypothesis
from functools import reduce
from hypothesis import assume
from hypothesis import settings
from hypothesis import strategies as st
from hypothesis.extra import numpy as stnp
from hypothesis.strategies import SearchStrategy
from torch.testing._internal.common_quantized import _calculate_dynamic_qparams, _calculate_dynamic_per_channel_qparams
Reported by Pylint.
Line: 10
Column: 1
from functools import reduce
from hypothesis import assume
from hypothesis import settings
from hypothesis import strategies as st
from hypothesis.extra import numpy as stnp
from hypothesis.strategies import SearchStrategy
from torch.testing._internal.common_quantized import _calculate_dynamic_qparams, _calculate_dynamic_per_channel_qparams
Reported by Pylint.
Line: 11
Column: 1
from hypothesis import assume
from hypothesis import settings
from hypothesis import strategies as st
from hypothesis.extra import numpy as stnp
from hypothesis.strategies import SearchStrategy
from torch.testing._internal.common_quantized import _calculate_dynamic_qparams, _calculate_dynamic_per_channel_qparams
# Setup for the hypothesis tests.
Reported by Pylint.
Line: 12
Column: 1
from hypothesis import settings
from hypothesis import strategies as st
from hypothesis.extra import numpy as stnp
from hypothesis.strategies import SearchStrategy
from torch.testing._internal.common_quantized import _calculate_dynamic_qparams, _calculate_dynamic_per_channel_qparams
# Setup for the hypothesis tests.
# The tuples are (torch_quantized_dtype, zero_point_enforce), where the last
Reported by Pylint.
Line: 23
Column: 5
# Tuple with all quantized data types.
_ALL_QINT_TYPES = (
torch.quint8,
torch.qint8,
torch.qint32,
)
# Enforced zero point for every quantized data type.
Reported by Pylint.
Line: 24
Column: 5
# Tuple with all quantized data types.
_ALL_QINT_TYPES = (
torch.quint8,
torch.qint8,
torch.qint32,
)
# Enforced zero point for every quantized data type.
# If None, any zero_point point within the range of the data type is OK.
Reported by Pylint.
Line: 25
Column: 5
_ALL_QINT_TYPES = (
torch.quint8,
torch.qint8,
torch.qint32,
)
# Enforced zero point for every quantized data type.
# If None, any zero_point point within the range of the data type is OK.
_ENFORCED_ZERO_POINT = defaultdict(lambda: None, {
Reported by Pylint.
Line: 31
Column: 5
# Enforced zero point for every quantized data type.
# If None, any zero_point point within the range of the data type is OK.
_ENFORCED_ZERO_POINT = defaultdict(lambda: None, {
torch.quint8: None,
torch.qint8: None,
torch.qint32: 0
})
def _get_valid_min_max(qparams):
Reported by Pylint.