The following issues were found
torch/utils/benchmark/op_fuzzers/unary.py
4 issues
Line: 16
Column: 36
class UnaryOpFuzzer(Fuzzer):
def __init__(self, seed, dtype=torch.float32, cuda=False):
super().__init__(
parameters=[
# Dimensionality of x. (e.g. 1D, 2D, or 3D.)
FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import torch
from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedTensor
_MIN_DIM_SIZE = 16
_MAX_DIM_SIZE = 16 * 1024 ** 2
_POW_TWO_SIZES = tuple(2 ** i for i in range(
Reported by Pylint.
Line: 15
Column: 1
))
class UnaryOpFuzzer(Fuzzer):
def __init__(self, seed, dtype=torch.float32, cuda=False):
super().__init__(
parameters=[
# Dimensionality of x. (e.g. 1D, 2D, or 3D.)
FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
Reported by Pylint.
Line: 64
Column: 1
],
# Repeatable entropy for downstream applications.
FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
],
tensors=[
FuzzedTensor(
name="x",
size=("k0", "k1", "k2"),
Reported by Pylint.
torch/quantization/quant_type.py
4 issues
Line: 1
Column: 1
import enum
# Quantization type (dynamic quantization, static quantization).
# Should match the c++ enum in quantization_type.h
class QuantType(enum.IntEnum):
DYNAMIC = 0
STATIC = 1
QAT = 2
WEIGHT_ONLY = 3
Reported by Pylint.
Line: 5
Column: 1
# Quantization type (dynamic quantization, static quantization).
# Should match the c++ enum in quantization_type.h
class QuantType(enum.IntEnum):
DYNAMIC = 0
STATIC = 1
QAT = 2
WEIGHT_ONLY = 3
Reported by Pylint.
Line: 12
Column: 1
WEIGHT_ONLY = 3
def quant_type_to_str(quant_type):
m = {
QuantType.STATIC: "static",
QuantType.DYNAMIC: "dynamic",
QuantType.QAT: "qat",
QuantType.WEIGHT_ONLY: "weight_only",
Reported by Pylint.
Line: 13
Column: 5
def quant_type_to_str(quant_type):
m = {
QuantType.STATIC: "static",
QuantType.DYNAMIC: "dynamic",
QuantType.QAT: "qat",
QuantType.WEIGHT_ONLY: "weight_only",
}
Reported by Pylint.
torch/utils/benchmark/op_fuzzers/spectral.py
4 issues
Line: 30
Column: 44
REGULAR_SIZES.sort()
class SpectralOpFuzzer(benchmark.Fuzzer):
def __init__(self, *, seed: int, dtype=torch.float64,
cuda: bool = False, probability_regular: float = 1.0):
super().__init__(
parameters=[
# Dimensionality of x. (e.g. 1D, 2D, or 3D.)
FuzzedParameter("ndim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
Reported by Pylint.
Line: 1
Column: 1
import math
import torch
from torch.utils import benchmark
from torch.utils.benchmark import FuzzedParameter, FuzzedTensor, ParameterAlias
__all__ = ['SpectralOpFuzzer']
Reported by Pylint.
Line: 13
Column: 1
MIN_DIM_SIZE = 16
MAX_DIM_SIZE = 16 * 1024
def power_range(upper_bound, base):
return (base ** i for i in range(int(math.log(upper_bound, base)) + 1))
# List of regular numbers from MIN_DIM_SIZE to MAX_DIM_SIZE
# These numbers factorize into multiples of prime factors 2, 3, and 5 only
# and are usually the fastest in FFT implementations.
Reported by Pylint.
Line: 29
Column: 1
REGULAR_SIZES.append(ijk)
REGULAR_SIZES.sort()
class SpectralOpFuzzer(benchmark.Fuzzer):
def __init__(self, *, seed: int, dtype=torch.float64,
cuda: bool = False, probability_regular: float = 1.0):
super().__init__(
parameters=[
# Dimensionality of x. (e.g. 1D, 2D, or 3D.)
Reported by Pylint.
torch/nn/quantizable/modules/__init__.py
4 issues
Line: 1
Column: 1
from .activation import MultiheadAttention
from .rnn import LSTM
from .rnn import LSTMCell
__all__ = [
'LSTM',
'LSTMCell',
'MultiheadAttention',
]
Reported by Pylint.
Line: 2
Column: 1
from .activation import MultiheadAttention
from .rnn import LSTM
from .rnn import LSTMCell
__all__ = [
'LSTM',
'LSTMCell',
'MultiheadAttention',
]
Reported by Pylint.
Line: 3
Column: 1
from .activation import MultiheadAttention
from .rnn import LSTM
from .rnn import LSTMCell
__all__ = [
'LSTM',
'LSTMCell',
'MultiheadAttention',
]
Reported by Pylint.
Line: 1
Column: 1
from .activation import MultiheadAttention
from .rnn import LSTM
from .rnn import LSTMCell
__all__ = [
'LSTM',
'LSTMCell',
'MultiheadAttention',
]
Reported by Pylint.
torch/utils/benchmark/op_fuzzers/binary.py
4 issues
Line: 16
Column: 36
class BinaryOpFuzzer(Fuzzer):
def __init__(self, seed, dtype=torch.float32, cuda=False):
super().__init__(
parameters=[
# Dimensionality of x and y. (e.g. 1D, 2D, or 3D.)
FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import torch
from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedTensor
_MIN_DIM_SIZE = 16
_MAX_DIM_SIZE = 16 * 1024 ** 2
_POW_TWO_SIZES = tuple(2 ** i for i in range(
Reported by Pylint.
Line: 15
Column: 1
))
class BinaryOpFuzzer(Fuzzer):
def __init__(self, seed, dtype=torch.float32, cuda=False):
super().__init__(
parameters=[
# Dimensionality of x and y. (e.g. 1D, 2D, or 3D.)
FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
Reported by Pylint.
Line: 79
Column: 1
],
# Repeatable entropy for downstream applications.
FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
],
tensors=[
FuzzedTensor(
name="x",
size=("k0", "k1", "k2"),
Reported by Pylint.
torch/testing/_internal/test_module/no_future_div.py
4 issues
Line: 1
Column: 1
import torch # noqa: F401
def div_int_nofuture():
return 1 / 2
def div_float_nofuture():
return 3.14 / 0.125
Reported by Pylint.
Line: 1
Column: 1
import torch # noqa: F401
def div_int_nofuture():
return 1 / 2
def div_float_nofuture():
return 3.14 / 0.125
Reported by Pylint.
Line: 4
Column: 1
import torch # noqa: F401
def div_int_nofuture():
return 1 / 2
def div_float_nofuture():
return 3.14 / 0.125
Reported by Pylint.
Line: 8
Column: 1
return 1 / 2
def div_float_nofuture():
return 3.14 / 0.125
Reported by Pylint.
torch/quantization/ns/ns_types.py
4 issues
Line: 18
Column: 3
[('start_node', Node), ('end_node', Node), ('base_op_node', Node)]
)
# TODO(future PR): see if we can use typing_extensions's TypedDict instead
# to properly type the various keys
# {
# # one of NSSingleResultValuesType
# 'type': 'weight',
# # the values of type specified above
Reported by Pylint.
Line: 1
Column: 1
import enum
from typing import NamedTuple
from torch.fx.graph import Node
from typing import Dict, Any, List, Union, Callable
class NSSingleResultValuesType(str, enum.Enum):
WEIGHT = 'weight'
Reported by Pylint.
Line: 6
Column: 1
from torch.fx.graph import Node
from typing import Dict, Any, List, Union, Callable
class NSSingleResultValuesType(str, enum.Enum):
WEIGHT = 'weight'
NODE_OUTPUT = 'node_output'
NODE_INPUT = 'node_input'
Reported by Pylint.
Line: 8
Column: 1
from typing import Dict, Any, List, Union, Callable
class NSSingleResultValuesType(str, enum.Enum):
WEIGHT = 'weight'
NODE_OUTPUT = 'node_output'
NODE_INPUT = 'node_input'
NSSubgraph = NamedTuple(
Reported by Pylint.
torch/distributed/algorithms/__init__.py
4 issues
Line: 1
Column: 1
from .join import Join
from .join import Joinable
from .join import JoinHook
Reported by Pylint.
Line: 2
Column: 1
from .join import Join
from .join import Joinable
from .join import JoinHook
Reported by Pylint.
Line: 3
Column: 1
from .join import Join
from .join import Joinable
from .join import JoinHook
Reported by Pylint.
Line: 1
Column: 1
from .join import Join
from .join import Joinable
from .join import JoinHook
Reported by Pylint.
torch/cuda/nvtx.py
4 issues
Line: 50
Column: 1
@contextmanager
def range(msg, *args, **kwargs):
"""
Context manager / decorator that pushes an NVTX range at the beginning
of its scope, and pops it at the end. If extra arguments are given,
they are passed as arguments to msg.format().
Reported by Pylint.
Line: 1
Column: 1
from contextlib import contextmanager
try:
from torch._C import _nvtx
except ImportError:
class _NVTXStub(object):
@staticmethod
def _fail(*args, **kwargs):
raise RuntimeError("NVTX functions not installed. Are you sure you have a CUDA build?")
Reported by Pylint.
Line: 6
Column: 5
try:
from torch._C import _nvtx
except ImportError:
class _NVTXStub(object):
@staticmethod
def _fail(*args, **kwargs):
raise RuntimeError("NVTX functions not installed. Are you sure you have a CUDA build?")
rangePushA = _fail
Reported by Pylint.
Line: 6
Column: 5
try:
from torch._C import _nvtx
except ImportError:
class _NVTXStub(object):
@staticmethod
def _fail(*args, **kwargs):
raise RuntimeError("NVTX functions not installed. Are you sure you have a CUDA build?")
rangePushA = _fail
Reported by Pylint.
tools/linter/clang_format_utils.py
4 issues
Line: 2
Column: 1
import os
from install.download_bin import download, PYTORCH_ROOT # type: ignore[import]
# This dictionary maps each platform to the S3 object URL for its clang-format binary.
PLATFORM_TO_CF_URL = {
"Darwin": "https://oss-clang-format.s3.us-east-2.amazonaws.com/mac/clang-format-mojave",
"Linux": "https://oss-clang-format.s3.us-east-2.amazonaws.com/linux64/clang-format-linux64",
}
Reported by Pylint.
Line: 19
Column: 32
CLANG_FORMAT_DIR = os.path.join(PYTORCH_ROOT, ".clang-format-bin")
CLANG_FORMAT_PATH = os.path.join(CLANG_FORMAT_DIR, "clang-format")
def get_and_check_clang_format(verbose: bool = False) -> bool:
return bool(download("clang-format", CLANG_FORMAT_DIR, PLATFORM_TO_CF_URL, PLATFORM_TO_HASH))
Reported by Pylint.
Line: 1
Column: 1
import os
from install.download_bin import download, PYTORCH_ROOT # type: ignore[import]
# This dictionary maps each platform to the S3 object URL for its clang-format binary.
PLATFORM_TO_CF_URL = {
"Darwin": "https://oss-clang-format.s3.us-east-2.amazonaws.com/mac/clang-format-mojave",
"Linux": "https://oss-clang-format.s3.us-east-2.amazonaws.com/linux64/clang-format-linux64",
}
Reported by Pylint.
Line: 19
Column: 1
CLANG_FORMAT_DIR = os.path.join(PYTORCH_ROOT, ".clang-format-bin")
CLANG_FORMAT_PATH = os.path.join(CLANG_FORMAT_DIR, "clang-format")
def get_and_check_clang_format(verbose: bool = False) -> bool:
return bool(download("clang-format", CLANG_FORMAT_DIR, PLATFORM_TO_CF_URL, PLATFORM_TO_HASH))
Reported by Pylint.