The following issues were found
torch/package/_mangling.py
9 issues
Line: 15
Column: 9
"""
def __init__(self):
global _mangle_index
self._mangle_index = _mangle_index
# Increment the global index
_mangle_index += 1
# Angle brackets are used so that there is almost no chance of
# confusing this module for a real module. Plus, it is Python's
Reported by Pylint.
Line: 42
Column: 9
mangled name, irrespective of which PackageMangler created it.
"""
if is_mangled(name):
first, sep, last = name.partition(".")
# If there is only a base mangle prefix, e.g. '<torch_package_0>',
# then return an empty string.
return last if len(sep) != 0 else ""
return name
Reported by Pylint.
Line: 6
Column: 1
"""
import re
_mangle_index = 0
class PackageMangler:
"""
Used on import, to ensure that all modules imported have a shared mangle parent.
Reported by Pylint.
Line: 15
Column: 9
"""
def __init__(self):
global _mangle_index
self._mangle_index = _mangle_index
# Increment the global index
_mangle_index += 1
# Angle brackets are used so that there is almost no chance of
# confusing this module for a real module. Plus, it is Python's
Reported by Pylint.
Line: 24
Column: 5
# preferred way of denoting special modules.
self._mangle_parent = f"<torch_package_{self._mangle_index}>"
def mangle(self, name) -> str:
assert len(name) != 0
return self._mangle_parent + "." + name
def parent_name(self):
return self._mangle_parent
Reported by Pylint.
Line: 25
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self._mangle_parent = f"<torch_package_{self._mangle_index}>"
def mangle(self, name) -> str:
assert len(name) != 0
return self._mangle_parent + "." + name
def parent_name(self):
return self._mangle_parent
Reported by Bandit.
Line: 28
Column: 5
assert len(name) != 0
return self._mangle_parent + "." + name
def parent_name(self):
return self._mangle_parent
def is_mangled(name: str) -> bool:
return bool(re.match(r"<torch_package_\d+>", name))
Reported by Pylint.
Line: 32
Column: 1
return self._mangle_parent
def is_mangled(name: str) -> bool:
return bool(re.match(r"<torch_package_\d+>", name))
def demangle(name: str) -> str:
"""
Reported by Pylint.
Line: 49
Column: 1
return name
def get_mangle_prefix(name: str) -> str:
return name.partition(".")[0] if is_mangled(name) else name
Reported by Pylint.
torch/nn/parallel/__init__.py
9 issues
Line: 1
Column: 1
from .parallel_apply import parallel_apply
from .replicate import replicate
from .data_parallel import DataParallel, data_parallel
from .scatter_gather import scatter, gather
from .distributed import DistributedDataParallel
__all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
'DataParallel', 'DistributedDataParallel']
Reported by Pylint.
Line: 2
Column: 1
from .parallel_apply import parallel_apply
from .replicate import replicate
from .data_parallel import DataParallel, data_parallel
from .scatter_gather import scatter, gather
from .distributed import DistributedDataParallel
__all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
'DataParallel', 'DistributedDataParallel']
Reported by Pylint.
Line: 3
Column: 1
from .parallel_apply import parallel_apply
from .replicate import replicate
from .data_parallel import DataParallel, data_parallel
from .scatter_gather import scatter, gather
from .distributed import DistributedDataParallel
__all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
'DataParallel', 'DistributedDataParallel']
Reported by Pylint.
Line: 4
Column: 1
from .parallel_apply import parallel_apply
from .replicate import replicate
from .data_parallel import DataParallel, data_parallel
from .scatter_gather import scatter, gather
from .distributed import DistributedDataParallel
__all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
'DataParallel', 'DistributedDataParallel']
Reported by Pylint.
Line: 5
Column: 1
from .replicate import replicate
from .data_parallel import DataParallel, data_parallel
from .scatter_gather import scatter, gather
from .distributed import DistributedDataParallel
__all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
'DataParallel', 'DistributedDataParallel']
def DistributedDataParallelCPU(*args, **kwargs):
Reported by Pylint.
Line: 1
Column: 1
from .parallel_apply import parallel_apply
from .replicate import replicate
from .data_parallel import DataParallel, data_parallel
from .scatter_gather import scatter, gather
from .distributed import DistributedDataParallel
__all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
'DataParallel', 'DistributedDataParallel']
Reported by Pylint.
Line: 10
Column: 1
__all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
'DataParallel', 'DistributedDataParallel']
def DistributedDataParallelCPU(*args, **kwargs):
import warnings
warnings.warn("torch.nn.parallel.DistributedDataParallelCPU is deprecated, "
"please use torch.nn.parallel.DistributedDataParallel instead.")
return DistributedDataParallel(*args, **kwargs)
Reported by Pylint.
Line: 10
Column: 1
__all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
'DataParallel', 'DistributedDataParallel']
def DistributedDataParallelCPU(*args, **kwargs):
import warnings
warnings.warn("torch.nn.parallel.DistributedDataParallelCPU is deprecated, "
"please use torch.nn.parallel.DistributedDataParallel instead.")
return DistributedDataParallel(*args, **kwargs)
Reported by Pylint.
Line: 11
Column: 5
'DataParallel', 'DistributedDataParallel']
def DistributedDataParallelCPU(*args, **kwargs):
import warnings
warnings.warn("torch.nn.parallel.DistributedDataParallelCPU is deprecated, "
"please use torch.nn.parallel.DistributedDataParallel instead.")
return DistributedDataParallel(*args, **kwargs)
Reported by Pylint.
torch/jit/_pickle.py
9 issues
Line: 32
Column: 29
return data
def restore_type_tag(value, type_str):
# The type_ptr is used by the jit unpickler to restore the full static type
# to container types like list when they are re-loaded, but this doesn't
# matter for Python, so just return the plain value
return value
Reported by Pylint.
Line: 1
Column: 1
# These functions are referenced from the pickle archives produced by
# ScriptModule.save()
# These (`build_*`) functions used to be used by `pickler.cpp` to specify
# the type of the list for certain special types, but now all lists get
# a type attached and restored via `restore_type_tag` below. The legacy
# functions should stick around for backwards-compatibility.
Reported by Pylint.
Line: 10
Column: 1
# a type attached and restored via `restore_type_tag` below. The legacy
# functions should stick around for backwards-compatibility.
def build_intlist(data):
return data
def build_tensorlist(data):
return data
Reported by Pylint.
Line: 14
Column: 1
return data
def build_tensorlist(data):
return data
def build_doublelist(data):
return data
Reported by Pylint.
Line: 18
Column: 1
return data
def build_doublelist(data):
return data
def build_boollist(data):
return data
Reported by Pylint.
Line: 22
Column: 1
return data
def build_boollist(data):
return data
def build_tensor_from_id(data):
if isinstance(data, int):
Reported by Pylint.
Line: 26
Column: 1
return data
def build_tensor_from_id(data):
if isinstance(data, int):
# just the id, can't really do anything
return data
Reported by Pylint.
Line: 26
Column: 1
return data
def build_tensor_from_id(data):
if isinstance(data, int):
# just the id, can't really do anything
return data
Reported by Pylint.
Line: 32
Column: 1
return data
def restore_type_tag(value, type_str):
# The type_ptr is used by the jit unpickler to restore the full static type
# to container types like list when they are re-loaded, but this doesn't
# matter for Python, so just return the plain value
return value
Reported by Pylint.
torch/utils/benchmark/utils/_stubs.py
9 issues
Line: 8
Column: 5
if TYPE_CHECKING or sys.version_info >= (3, 8):
from typing import runtime_checkable, Protocol
else:
from typing_extensions import runtime_checkable, Protocol
class TimerClass(Protocol):
"""This is the portion of the `timeit.Timer` API used by benchmark utils."""
def __init__(
Reported by Pylint.
Line: 13
Column: 5
class TimerClass(Protocol):
"""This is the portion of the `timeit.Timer` API used by benchmark utils."""
def __init__(
self,
stmt: str,
setup: str,
timer: Callable[[], float],
globals: Dict[str, Any],
Reported by Pylint.
Line: 18
Column: 9
stmt: str,
setup: str,
timer: Callable[[], float],
globals: Dict[str, Any],
**kwargs: Any,
) -> None:
...
def timeit(self, number: int) -> float:
Reported by Pylint.
Line: 1
Column: 1
import sys
from typing import Any, Callable, Dict, TYPE_CHECKING
if TYPE_CHECKING or sys.version_info >= (3, 8):
from typing import runtime_checkable, Protocol
else:
from typing_extensions import runtime_checkable, Protocol
Reported by Pylint.
Line: 11
Column: 1
from typing_extensions import runtime_checkable, Protocol
class TimerClass(Protocol):
"""This is the portion of the `timeit.Timer` API used by benchmark utils."""
def __init__(
self,
stmt: str,
setup: str,
Reported by Pylint.
Line: 23
Column: 5
) -> None:
...
def timeit(self, number: int) -> float:
...
@runtime_checkable
class TimeitModuleType(Protocol):
Reported by Pylint.
Line: 28
Column: 1
@runtime_checkable
class TimeitModuleType(Protocol):
"""Modules generated from `timeit_template.cpp`."""
def timeit(self, number: int) -> float:
...
Reported by Pylint.
Line: 30
Column: 5
@runtime_checkable
class TimeitModuleType(Protocol):
"""Modules generated from `timeit_template.cpp`."""
def timeit(self, number: int) -> float:
...
class CallgrindModuleType(Protocol):
"""Replicates the valgrind endpoints in `torch._C`.
Reported by Pylint.
Line: 34
Column: 1
...
class CallgrindModuleType(Protocol):
"""Replicates the valgrind endpoints in `torch._C`.
These bindings are used to collect Callgrind profiles on earlier versions
of PyTorch and will eventually be removed.
"""
Reported by Pylint.
torch/package/_package_pickler.py
9 issues
Line: 7
Column: 1
from struct import pack
from types import FunctionType
from .importer import Importer, ObjMismatchError, ObjNotFoundError, sys_importer
class PackagePickler(_Pickler):
"""Package-aware pickler.
Reported by Pylint.
Line: 28
Column: 9
# forces us to copy/paste this function. The only change is marked
# CHANGED below.
write = self.write
memo = self.memo
# CHANGED: import module from module environment instead of __import__
try:
module_name, name = self.importer.get_name(obj, name)
except (ObjNotFoundError, ObjMismatchError) as err:
Reported by Pylint.
Line: 2
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b403-import-pickle
"""isort:skip_file"""
from pickle import EXT1, EXT2, EXT4, GLOBAL, STACK_GLOBAL, Pickler, PicklingError
from pickle import _compat_pickle, _extension_registry, _getattribute, _Pickler # type: ignore[attr-defined]
from struct import pack
from types import FunctionType
from .importer import Importer, ObjMismatchError, ObjNotFoundError, sys_importer
Reported by Bandit.
Line: 3
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b403-import-pickle
"""isort:skip_file"""
from pickle import EXT1, EXT2, EXT4, GLOBAL, STACK_GLOBAL, Pickler, PicklingError
from pickle import _compat_pickle, _extension_registry, _getattribute, _Pickler # type: ignore[attr-defined]
from struct import pack
from types import FunctionType
from .importer import Importer, ObjMismatchError, ObjNotFoundError, sys_importer
Reported by Bandit.
Line: 3
Column: 1
"""isort:skip_file"""
from pickle import EXT1, EXT2, EXT4, GLOBAL, STACK_GLOBAL, Pickler, PicklingError
from pickle import _compat_pickle, _extension_registry, _getattribute, _Pickler # type: ignore[attr-defined]
from struct import pack
from types import FunctionType
from .importer import Importer, ObjMismatchError, ObjNotFoundError, sys_importer
Reported by Pylint.
Line: 23
Column: 5
self.importer = importer
super().__init__(*args, **kwargs)
def save_global(self, obj, name=None):
# unfortunately the pickler code is factored in a way that
# forces us to copy/paste this function. The only change is marked
# CHANGED below.
write = self.write
memo = self.memo
Reported by Pylint.
Line: 43
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if self.proto >= 2:
code = _extension_registry.get((module_name, name))
if code:
assert code > 0
if code <= 0xFF:
write(EXT1 + pack("<B", code))
elif code <= 0xFFFF:
write(EXT2 + pack("<H", code))
else:
Reported by Bandit.
Line: 96
Column: 1
dispatch[FunctionType] = save_global
def create_pickler(data_buf, importer):
if importer is sys_importer:
# if we are using the normal import library system, then
# we can use the C implementation of pickle which is faster
return Pickler(data_buf, protocol=3)
else:
Reported by Pylint.
Line: 97
Column: 5
def create_pickler(data_buf, importer):
if importer is sys_importer:
# if we are using the normal import library system, then
# we can use the C implementation of pickle which is faster
return Pickler(data_buf, protocol=3)
else:
return PackagePickler(importer, data_buf, protocol=3)
Reported by Pylint.
torch/onnx/operators.py
8 issues
Line: 16
Column: 12
def shape_as_tensor(x):
return torch._shape_as_tensor(x)
def reshape_from_tensor_shape(x, shape):
return torch._reshape_from_tensor(x, shape)
Reported by Pylint.
Line: 20
Column: 12
def reshape_from_tensor_shape(x, shape):
return torch._reshape_from_tensor(x, shape)
Reported by Pylint.
Line: 16
Column: 12
def shape_as_tensor(x):
return torch._shape_as_tensor(x)
def reshape_from_tensor_shape(x, shape):
return torch._reshape_from_tensor(x, shape)
Reported by Pylint.
Line: 20
Column: 12
def reshape_from_tensor_shape(x, shape):
return torch._reshape_from_tensor(x, shape)
Reported by Pylint.
Line: 15
Column: 1
import torch.onnx.utils
def shape_as_tensor(x):
return torch._shape_as_tensor(x)
def reshape_from_tensor_shape(x, shape):
return torch._reshape_from_tensor(x, shape)
Reported by Pylint.
Line: 15
Column: 1
import torch.onnx.utils
def shape_as_tensor(x):
return torch._shape_as_tensor(x)
def reshape_from_tensor_shape(x, shape):
return torch._reshape_from_tensor(x, shape)
Reported by Pylint.
Line: 19
Column: 1
return torch._shape_as_tensor(x)
def reshape_from_tensor_shape(x, shape):
return torch._reshape_from_tensor(x, shape)
Reported by Pylint.
Line: 19
Column: 1
return torch._shape_as_tensor(x)
def reshape_from_tensor_shape(x, shape):
return torch._reshape_from_tensor(x, shape)
Reported by Pylint.
torch/profiler/__init__.py
8 issues
Line: 11
Column: 1
'''
from .profiler import profile, schedule, supported_activities, tensorboard_trace_handler, ProfilerAction, ProfilerActivity
from torch.autograd import kineto_available, _supported_activities, DeviceType
from torch.autograd.profiler import record_function
Reported by Pylint.
Line: 2
Column: 1
r'''
PyTorch Profiler is a tool that allows the collecton of the performance metrics during the training and inference.
Profiler's context manager API can be used to better understand what model operators are the most expensive,
examine their input shapes and stack traces, study device kernel activity and visualize the execution trace.
.. note::
An earlier version of the API in :mod:`torch.autograd` module is considered legacy and will be deprecated.
'''
Reported by Pylint.
Line: 3
Column: 1
r'''
PyTorch Profiler is a tool that allows the collecton of the performance metrics during the training and inference.
Profiler's context manager API can be used to better understand what model operators are the most expensive,
examine their input shapes and stack traces, study device kernel activity and visualize the execution trace.
.. note::
An earlier version of the API in :mod:`torch.autograd` module is considered legacy and will be deprecated.
'''
Reported by Pylint.
Line: 4
Column: 1
r'''
PyTorch Profiler is a tool that allows the collecton of the performance metrics during the training and inference.
Profiler's context manager API can be used to better understand what model operators are the most expensive,
examine their input shapes and stack traces, study device kernel activity and visualize the execution trace.
.. note::
An earlier version of the API in :mod:`torch.autograd` module is considered legacy and will be deprecated.
'''
Reported by Pylint.
Line: 7
Column: 1
examine their input shapes and stack traces, study device kernel activity and visualize the execution trace.
.. note::
An earlier version of the API in :mod:`torch.autograd` module is considered legacy and will be deprecated.
'''
from .profiler import profile, schedule, supported_activities, tensorboard_trace_handler, ProfilerAction, ProfilerActivity
from torch.autograd import kineto_available, _supported_activities, DeviceType
Reported by Pylint.
Line: 11
Column: 1
'''
from .profiler import profile, schedule, supported_activities, tensorboard_trace_handler, ProfilerAction, ProfilerActivity
from torch.autograd import kineto_available, _supported_activities, DeviceType
from torch.autograd.profiler import record_function
Reported by Pylint.
Line: 12
Column: 1
'''
from .profiler import profile, schedule, supported_activities, tensorboard_trace_handler, ProfilerAction, ProfilerActivity
from torch.autograd import kineto_available, _supported_activities, DeviceType
from torch.autograd.profiler import record_function
Reported by Pylint.
Line: 13
Column: 1
from .profiler import profile, schedule, supported_activities, tensorboard_trace_handler, ProfilerAction, ProfilerActivity
from torch.autograd import kineto_available, _supported_activities, DeviceType
from torch.autograd.profiler import record_function
Reported by Pylint.
torch/utils/benchmark/examples/fuzzer.py
8 issues
Line: 11
Column: 1
import torch.utils.benchmark as benchmark_utils
def main():
add_fuzzer = benchmark_utils.Fuzzer(
parameters=[
[
benchmark_utils.FuzzedParameter(
name=f"k{i}",
Reported by Pylint.
Line: 11
Column: 1
import torch.utils.benchmark as benchmark_utils
def main():
add_fuzzer = benchmark_utils.Fuzzer(
parameters=[
[
benchmark_utils.FuzzedParameter(
name=f"k{i}",
Reported by Pylint.
Line: 42
Column: 5
seed=0,
)
n = 250
measurements = []
for i, (tensors, tensor_properties, _) in enumerate(add_fuzzer.take(n=n)):
x, x_order = tensors["x"], str(tensor_properties["x"]["order"])
y, y_order = tensors["y"], str(tensor_properties["y"]["order"])
shape = ", ".join(tuple(f'{i:>4}' for i in x.shape))
Reported by Pylint.
Line: 45
Column: 9
n = 250
measurements = []
for i, (tensors, tensor_properties, _) in enumerate(add_fuzzer.take(n=n)):
x, x_order = tensors["x"], str(tensor_properties["x"]["order"])
y, y_order = tensors["y"], str(tensor_properties["y"]["order"])
shape = ", ".join(tuple(f'{i:>4}' for i in x.shape))
description = "".join([
f"{x.numel():>7} | {shape:<16} | ",
Reported by Pylint.
Line: 46
Column: 9
measurements = []
for i, (tensors, tensor_properties, _) in enumerate(add_fuzzer.take(n=n)):
x, x_order = tensors["x"], str(tensor_properties["x"]["order"])
y, y_order = tensors["y"], str(tensor_properties["y"]["order"])
shape = ", ".join(tuple(f'{i:>4}' for i in x.shape))
description = "".join([
f"{x.numel():>7} | {shape:<16} | ",
f"{'contiguous' if x.is_contiguous() else x_order:<12} | ",
Reported by Pylint.
Line: 70
Column: 5
# More string munging to make pretty output.
print(f"Average attemts per valid config: {1. / (1. - add_fuzzer.rejection_rate):.1f}")
def time_fn(m):
return m.median / m.metadata["numel"]
measurements.sort(key=time_fn)
template = f"{{:>6}}{' ' * 19}Size Shape{' ' * 13}X order Y order\n{'-' * 80}"
print(template.format("Best:"))
Reported by Pylint.
Line: 76
Column: 9
template = f"{{:>6}}{' ' * 19}Size Shape{' ' * 13}X order Y order\n{'-' * 80}"
print(template.format("Best:"))
for m in measurements[:15]:
print(f"{time_fn(m) * 1e9:>4.1f} ns / element {m.description}")
print("\n" + template.format("Worst:"))
for m in measurements[-15:]:
print(f"{time_fn(m) * 1e9:>4.1f} ns / element {m.description}")
Reported by Pylint.
Line: 80
Column: 9
print(f"{time_fn(m) * 1e9:>4.1f} ns / element {m.description}")
print("\n" + template.format("Worst:"))
for m in measurements[-15:]:
print(f"{time_fn(m) * 1e9:>4.1f} ns / element {m.description}")
if __name__ == "__main__":
main()
Reported by Pylint.
torch/utils/benchmark/examples/op_benchmark.py
8 issues
Line: 28
Column: 43
def run(n, stmt, fuzzer_cls):
float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
int_iter = fuzzer_cls(seed=0, dtype=torch.int32).take(n)
raw_results = []
for i, (float_values, int_values) in enumerate(zip(float_iter, int_iter)):
float_tensors, float_tensor_params, float_params = float_values
int_tensors, int_tensor_params, int_params = int_values
Reported by Pylint.
Line: 29
Column: 41
def run(n, stmt, fuzzer_cls):
float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
int_iter = fuzzer_cls(seed=0, dtype=torch.int32).take(n)
raw_results = []
for i, (float_values, int_values) in enumerate(zip(float_iter, int_iter)):
float_tensors, float_tensor_params, float_params = float_values
int_tensors, int_tensor_params, int_params = int_values
Reported by Pylint.
Line: 23
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
x = {"a": np.ones((2, 1))}
x == x # Raises ValueError
"""
assert set(dict_0.keys()) == set(dict_0.keys())
assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype")
def run(n, stmt, fuzzer_cls):
float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
Reported by Bandit.
Line: 24
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
x == x # Raises ValueError
"""
assert set(dict_0.keys()) == set(dict_0.keys())
assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype")
def run(n, stmt, fuzzer_cls):
float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
int_iter = fuzzer_cls(seed=0, dtype=torch.int32).take(n)
Reported by Bandit.
Line: 27
Column: 1
assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype")
def run(n, stmt, fuzzer_cls):
float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
int_iter = fuzzer_cls(seed=0, dtype=torch.int32).take(n)
raw_results = []
for i, (float_values, int_values) in enumerate(zip(float_iter, int_iter)):
float_tensors, float_tensor_params, float_params = float_values
Reported by Pylint.
Line: 27
Column: 1
assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype")
def run(n, stmt, fuzzer_cls):
float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
int_iter = fuzzer_cls(seed=0, dtype=torch.int32).take(n)
raw_results = []
for i, (float_values, int_values) in enumerate(zip(float_iter, int_iter)):
float_tensors, float_tensor_params, float_params = float_values
Reported by Pylint.
Line: 27
Column: 1
assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype")
def run(n, stmt, fuzzer_cls):
float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
int_iter = fuzzer_cls(seed=0, dtype=torch.int32).take(n)
raw_results = []
for i, (float_values, int_values) in enumerate(zip(float_iter, int_iter)):
float_tensors, float_tensor_params, float_params = float_values
Reported by Pylint.
Line: 96
Column: 1
print(spacer)
def main():
run(n=100, stmt="torch.median(x, dim=0)", fuzzer_cls=UnaryOpFuzzer)
run(n=100, stmt="torch.square(x)", fuzzer_cls=UnaryOpFuzzer)
run(n=100, stmt="x + y", fuzzer_cls=BinaryOpFuzzer)
Reported by Pylint.
torch/nn/modules/channelshuffle.py
8 issues
Line: 1
Column: 1
from .module import Module
from .. import functional as F
from torch import Tensor
class ChannelShuffle(Module):
r"""Divide the channels in a tensor of shape :math:`(*, C , H, W)`
into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`,
Reported by Pylint.
Line: 2
Column: 1
from .module import Module
from .. import functional as F
from torch import Tensor
class ChannelShuffle(Module):
r"""Divide the channels in a tensor of shape :math:`(*, C , H, W)`
into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`,
Reported by Pylint.
Line: 48
Column: 23
super(ChannelShuffle, self).__init__()
self.groups = groups
def forward(self, input: Tensor) -> Tensor:
return F.channel_shuffle(input, self.groups)
def extra_repr(self) -> str:
return 'groups={}'.format(self.groups)
Reported by Pylint.
Line: 1
Column: 1
from .module import Module
from .. import functional as F
from torch import Tensor
class ChannelShuffle(Module):
r"""Divide the channels in a tensor of shape :math:`(*, C , H, W)`
into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`,
Reported by Pylint.
Line: 4
Column: 1
from .module import Module
from .. import functional as F
from torch import Tensor
class ChannelShuffle(Module):
r"""Divide the channels in a tensor of shape :math:`(*, C , H, W)`
into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`,
Reported by Pylint.
Line: 45
Column: 9
groups: int
def __init__(self, groups: int) -> None:
super(ChannelShuffle, self).__init__()
self.groups = groups
def forward(self, input: Tensor) -> Tensor:
return F.channel_shuffle(input, self.groups)
Reported by Pylint.
Line: 48
Column: 5
super(ChannelShuffle, self).__init__()
self.groups = groups
def forward(self, input: Tensor) -> Tensor:
return F.channel_shuffle(input, self.groups)
def extra_repr(self) -> str:
return 'groups={}'.format(self.groups)
Reported by Pylint.
Line: 51
Column: 5
def forward(self, input: Tensor) -> Tensor:
return F.channel_shuffle(input, self.groups)
def extra_repr(self) -> str:
return 'groups={}'.format(self.groups)
Reported by Pylint.