The following issues were found

caffe2/python/lengths_reducer_fused_8bit_rowwise_ops_test.py
15 issues
Unable to import 'hypothesis.strategies'
Error

Line: 4 Column: 1

              

import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given



            

Reported by Pylint.

Unable to import 'hypothesis'
Error

Line: 7 Column: 1

              import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given


def compare_rowwise(emb_orig, emb_reconstructed, fp16):
    # there is an absolute error introduced per row through int8 quantization
    # and a relative error introduced when quantizing back from fp32 to fp16

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              

import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given



            

Reported by Pylint.

Missing function or method docstring
Error

Line: 10 Column: 1

              from hypothesis import given


def compare_rowwise(emb_orig, emb_reconstructed, fp16):
    # there is an absolute error introduced per row through int8 quantization
    # and a relative error introduced when quantizing back from fp32 to fp16
    assert emb_orig.shape == emb_reconstructed.shape
    rtol = 1e-8
    if fp16:

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 13
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

              def compare_rowwise(emb_orig, emb_reconstructed, fp16):
    # there is an absolute error introduced per row through int8 quantization
    # and a relative error introduced when quantizing back from fp32 to fp16
    assert emb_orig.shape == emb_reconstructed.shape
    rtol = 1e-8
    if fp16:
        rtol = 1e-3
    erange = np.amax(emb_orig, axis=1) - np.amin(emb_orig, axis=1)


            

Reported by Bandit.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 31
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                      if n_violated > 0:
            print(isclose, threshold[i])
            print(i, r_orig, r_reconstructed, threshold[i], r_orig - r_reconstructed)
        assert n_violated == 0


class TestLengthsReducerOpsFused8BitRowwise(hu.HypothesisTestCase):
    @given(
        num_rows=st.integers(1, 20),

            

Reported by Bandit.

Missing class docstring
Error

Line: 34 Column: 1

                      assert n_violated == 0


class TestLengthsReducerOpsFused8BitRowwise(hu.HypothesisTestCase):
    @given(
        num_rows=st.integers(1, 20),
        blocksize=st.sampled_from([8, 16, 32, 64, 85, 96, 128, 163]),
        weighted=st.booleans(),
        seed=st.integers(0, 2 ** 32 - 1),

            

Reported by Pylint.

Too many arguments (7/5)
Error

Line: 42 Column: 5

                      seed=st.integers(0, 2 ** 32 - 1),
        empty_indices=st.booleans(),
        fp16=st.booleans(),
    )
    def test_sparse_lengths_sum(
        self, num_rows, blocksize, weighted, seed, empty_indices, fp16
    ):
        net = core.Net("bench")


            

Reported by Pylint.

Method could be a function
Error

Line: 42 Column: 5

                      seed=st.integers(0, 2 ** 32 - 1),
        empty_indices=st.booleans(),
        fp16=st.booleans(),
    )
    def test_sparse_lengths_sum(
        self, num_rows, blocksize, weighted, seed, empty_indices, fp16
    ):
        net = core.Net("bench")


            

Reported by Pylint.

Too many local variables (18/15)
Error

Line: 42 Column: 5

                      seed=st.integers(0, 2 ** 32 - 1),
        empty_indices=st.booleans(),
        fp16=st.booleans(),
    )
    def test_sparse_lengths_sum(
        self, num_rows, blocksize, weighted, seed, empty_indices, fp16
    ):
        net = core.Net("bench")


            

Reported by Pylint.

torch/utils/benchmark/utils/cpp_jit.py
15 issues
Using the global statement
Error

Line: 35 Column: 5

              _BUILD_ROOT: Optional[str] = None

def _get_build_root() -> str:
    global _BUILD_ROOT
    if _BUILD_ROOT is None:
        _BUILD_ROOT = _make_temp_dir(prefix="benchmark_utils_jit_build")
        atexit.register(shutil.rmtree, _BUILD_ROOT)
    return _BUILD_ROOT


            

Reported by Pylint.

Access to a protected member _cxx_flags of a client class
Error

Line: 69 Column: 21

              CXX_FLAGS: Optional[List[str]]
if hasattr(torch.__config__, "_cxx_flags"):
    try:
        CXX_FLAGS = torch.__config__._cxx_flags().strip().split()
        if CXX_FLAGS is not None and "-g" not in CXX_FLAGS:
            CXX_FLAGS.append("-g")

    except RuntimeError:
        # We are in FBCode.

            

Reported by Pylint.

FIXME: Remove when back testing is no longer required.
Error

Line: 77 Column: 3

                      # We are in FBCode.
        CXX_FLAGS = None
else:
    # FIXME: Remove when back testing is no longer required.
    CXX_FLAGS = ["-O2", "-fPIC", "-g"]

EXTRA_INCLUDE_PATHS: List[str] = [os.path.join(SOURCE_ROOT, "valgrind_wrapper")]
CONDA_PREFIX = os.getenv("CONDA_PREFIX")
if CONDA_PREFIX is not None:

            

Reported by Pylint.

Using the global statement
Error

Line: 90 Column: 9

              COMPAT_CALLGRIND_BINDINGS: Optional[CallgrindModuleType] = None
def get_compat_bindings() -> CallgrindModuleType:
    with LOCK:
        global COMPAT_CALLGRIND_BINDINGS
        if COMPAT_CALLGRIND_BINDINGS is None:
            COMPAT_CALLGRIND_BINDINGS = cpp_extension.load(
                name="callgrind_bindings",
                sources=[os.path.join(
                    SOURCE_ROOT,

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 88 Column: 1

              

COMPAT_CALLGRIND_BINDINGS: Optional[CallgrindModuleType] = None
def get_compat_bindings() -> CallgrindModuleType:
    with LOCK:
        global COMPAT_CALLGRIND_BINDINGS
        if COMPAT_CALLGRIND_BINDINGS is None:
            COMPAT_CALLGRIND_BINDINGS = cpp_extension.load(
                name="callgrind_bindings",

            

Reported by Pylint.

Variable name "f" doesn't conform to snake_case naming style
Error

Line: 136 Column: 38

                      os.makedirs(build_dir, exist_ok=True)

        src_path = os.path.join(build_dir, "timer_src.cpp")
        with open(src_path, "wt") as f:
            f.write(src)

    # `cpp_extension` has its own locking scheme, so we don't need our lock.
    return cpp_extension.load(
        name=name,

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 151 Column: 1

                  )


def compile_timeit_template(*, stmt: str, setup: str, global_setup: str) -> TimeitModuleType:
    template_path: str = os.path.join(SOURCE_ROOT, "timeit_template.cpp")
    with open(template_path, "rt") as f:
        src: str = f.read()

    module = _compile_template(stmt=stmt, setup=setup, global_setup=global_setup, src=src, is_standalone=False)

            

Reported by Pylint.

Variable name "f" doesn't conform to snake_case naming style
Error

Line: 153 Column: 39

              
def compile_timeit_template(*, stmt: str, setup: str, global_setup: str) -> TimeitModuleType:
    template_path: str = os.path.join(SOURCE_ROOT, "timeit_template.cpp")
    with open(template_path, "rt") as f:
        src: str = f.read()

    module = _compile_template(stmt=stmt, setup=setup, global_setup=global_setup, src=src, is_standalone=False)
    assert isinstance(module, TimeitModuleType)
    return module

            

Reported by Pylint.

Line too long (111/100)
Error

Line: 156 Column: 1

                  with open(template_path, "rt") as f:
        src: str = f.read()

    module = _compile_template(stmt=stmt, setup=setup, global_setup=global_setup, src=src, is_standalone=False)
    assert isinstance(module, TimeitModuleType)
    return module


def compile_callgrind_template(*, stmt: str, setup: str, global_setup: str) -> str:

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 157
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                      src: str = f.read()

    module = _compile_template(stmt=stmt, setup=setup, global_setup=global_setup, src=src, is_standalone=False)
    assert isinstance(module, TimeitModuleType)
    return module


def compile_callgrind_template(*, stmt: str, setup: str, global_setup: str) -> str:
    template_path: str = os.path.join(SOURCE_ROOT, "valgrind_wrapper", "timer_callgrind_template.cpp")

            

Reported by Bandit.

torch/fx/experimental/unification/utils.py
15 issues
Catching too general exception Exception
Error

Line: 84 Column: 12

                  try:
        func()
        raise Exception("XFailed test passed")  # pragma:nocover
    except Exception:
        pass


def freeze(d):
    """ Freeze container to hashable form

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              def hashable(x):
    try:
        hash(x)
        return True
    except TypeError:
        return False


def transitive_get(key, d):

            

Reported by Pylint.

Argument name "x" doesn't conform to snake_case naming style
Error

Line: 1 Column: 1

              def hashable(x):
    try:
        hash(x)
        return True
    except TypeError:
        return False


def transitive_get(key, d):

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 1 Column: 1

              def hashable(x):
    try:
        hash(x)
        return True
    except TypeError:
        return False


def transitive_get(key, d):

            

Reported by Pylint.

Argument name "d" doesn't conform to snake_case naming style
Error

Line: 9 Column: 1

                      return False


def transitive_get(key, d):
    """ Transitive dict.get
    >>> d = {1: 2, 2: 3, 3: 4}
    >>> d.get(1)
    2
    >>> transitive_get(1, d)

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 22 Column: 1

                  return key


def raises(err, lamda):
    try:
        lamda()
        return False
    except err:
        return True

            

Reported by Pylint.

Variable name "S" doesn't conform to snake_case naming style
Error

Line: 47 Column: 5

                  """
    incoming_edges = reverse_dict(edges)
    incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())
    S = set((v for v in edges if v not in incoming_edges))
    L = []

    while S:
        n = S.pop()
        L.append(n)

            

Reported by Pylint.

Variable name "L" doesn't conform to snake_case naming style
Error

Line: 48 Column: 5

                  incoming_edges = reverse_dict(edges)
    incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())
    S = set((v for v in edges if v not in incoming_edges))
    L = []

    while S:
        n = S.pop()
        L.append(n)
        for m in edges.get(n, ()):

            

Reported by Pylint.

Variable name "n" doesn't conform to snake_case naming style
Error

Line: 51 Column: 9

                  L = []

    while S:
        n = S.pop()
        L.append(n)
        for m in edges.get(n, ()):
            assert n in incoming_edges[m]
            incoming_edges[m].remove(n)
            if not incoming_edges[m]:

            

Reported by Pylint.

Variable name "m" doesn't conform to snake_case naming style
Error

Line: 53 Column: 13

                  while S:
        n = S.pop()
        L.append(n)
        for m in edges.get(n, ()):
            assert n in incoming_edges[m]
            incoming_edges[m].remove(n)
            if not incoming_edges[m]:
                S.add(m)
    if any(incoming_edges.get(v, None) for v in edges):

            

Reported by Pylint.

torch/quantization/fx/pattern_utils.py
15 issues
Attempted relative import beyond top-level package
Error

Line: 7 Column: 1

              from torch.fx.graph import (
    Node,
)
from .quantization_types import Pattern
from ..qconfig import QConfigAny
# from .quantization_patterns import BinaryOpQuantizeHandler


# TODO(future PR): fix the typing on QuantizeHandler (currently a circular dependency)

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 8 Column: 1

                  Node,
)
from .quantization_types import Pattern
from ..qconfig import QConfigAny
# from .quantization_patterns import BinaryOpQuantizeHandler


# TODO(future PR): fix the typing on QuantizeHandler (currently a circular dependency)
QuantizeHandler = Any

            

Reported by Pylint.

TODO(future PR): fix the typing on QuantizeHandler (currently a circular dependency)
Error

Line: 12 Column: 3

              # from .quantization_patterns import BinaryOpQuantizeHandler


# TODO(future PR): fix the typing on QuantizeHandler (currently a circular dependency)
QuantizeHandler = Any

MatchResult = Tuple[Node, List[Node], Optional[Pattern], QuantizeHandler,
                    QConfigAny]


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
from collections import OrderedDict
from typing import Dict, Any, Tuple, List, Optional
from torch.fx.graph import (
    Node,
)
from .quantization_types import Pattern
from ..qconfig import QConfigAny
# from .quantization_patterns import BinaryOpQuantizeHandler

            

Reported by Pylint.

standard import "from collections import OrderedDict" should be placed before "import torch"
Error

Line: 2 Column: 1

              import torch
from collections import OrderedDict
from typing import Dict, Any, Tuple, List, Optional
from torch.fx.graph import (
    Node,
)
from .quantization_types import Pattern
from ..qconfig import QConfigAny
# from .quantization_patterns import BinaryOpQuantizeHandler

            

Reported by Pylint.

standard import "from typing import Dict, Any, Tuple, List, Optional" should be placed before "import torch"
Error

Line: 3 Column: 1

              import torch
from collections import OrderedDict
from typing import Dict, Any, Tuple, List, Optional
from torch.fx.graph import (
    Node,
)
from .quantization_types import Pattern
from ..qconfig import QConfigAny
# from .quantization_patterns import BinaryOpQuantizeHandler

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 20 Column: 1

              
# pattern for conv bn fusion
DEFAULT_FUSION_PATTERNS = OrderedDict()
def register_fusion_pattern(pattern):
    def insert(fn):
        DEFAULT_FUSION_PATTERNS[pattern] = fn
        return fn
    return insert


            

Reported by Pylint.

Argument name "fn" doesn't conform to snake_case naming style
Error

Line: 21 Column: 5

              # pattern for conv bn fusion
DEFAULT_FUSION_PATTERNS = OrderedDict()
def register_fusion_pattern(pattern):
    def insert(fn):
        DEFAULT_FUSION_PATTERNS[pattern] = fn
        return fn
    return insert

def get_default_fusion_patterns() -> Dict[Pattern, QuantizeHandler]:

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 26 Column: 1

                      return fn
    return insert

def get_default_fusion_patterns() -> Dict[Pattern, QuantizeHandler]:
    return DEFAULT_FUSION_PATTERNS

DEFAULT_QUANTIZATION_PATTERNS = OrderedDict()
# a map from pattern to activation_post_process(observer/fake_quant) consstructor for output activation
# e.g. pattern: torch.sigmoid,

            

Reported by Pylint.

Line too long (103/100)
Error

Line: 30 Column: 1

                  return DEFAULT_FUSION_PATTERNS

DEFAULT_QUANTIZATION_PATTERNS = OrderedDict()
# a map from pattern to activation_post_process(observer/fake_quant) consstructor for output activation
# e.g. pattern: torch.sigmoid,
#      output_activation_post_process: default_affine_fixed_qparam_fake_quant
DEFAULT_OUTPUT_ACTIVATION_POST_PROCESS_MAP = dict()

# Register pattern for both static quantization and qat

            

Reported by Pylint.

torch/nn/intrinsic/quantized/modules/conv_relu.py
15 issues
Access to a protected member _reverse_repeat_padding of a client class
Error

Line: 10 Column: 27

              
from torch.nn.utils import fuse_conv_bn_weights

_reverse_repeat_padding = nnq.modules.conv._reverse_repeat_padding

class ConvReLU1d(nnq.Conv1d):
    r"""
    A ConvReLU1d module is a fused module of Conv1d and ReLU


            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 32 Column: 23

                          padding=padding, dilation=dilation, groups=groups, bias=bias,
            padding_mode=padding_mode)

    def forward(self, input):
        # Temporarily using len(shape) instead of ndim due to JIT issue
        # https://github.com/pytorch/pytorch/issues/23890
        if len(input.shape) != 3:
            raise ValueError("Input shape must be `(N, C, L)`!")
        if self.padding_mode != 'zeros':

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 76 Column: 23

                          padding=padding, dilation=dilation, groups=groups, bias=bias,
            padding_mode=padding_mode)

    def forward(self, input):
        # Temporarily using len(shape) instead of ndim due to JIT issue
        # https://github.com/pytorch/pytorch/issues/23890
        if len(input.shape) != 4:
            raise ValueError("Input shape must be `(N, C, H, W)`!")
        if self.padding_mode != 'zeros':

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 120 Column: 23

                          padding=padding, dilation=dilation, groups=groups, bias=bias,
            padding_mode=padding_mode)

    def forward(self, input):
        # Temporarily using len(shape) instead of ndim due to JIT issue
        # https://github.com/pytorch/pytorch/issues/23890
        if len(input.shape) != 5:
            raise ValueError("Input shape must be `(N, C, D, H, W)`!")
        if self.padding_mode != 'zeros':

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              
import torch
import torch.nn.intrinsic
import torch.nn.intrinsic.qat
import torch.nn.functional as F
import torch.nn.quantized as nnq

from torch.nn.utils import fuse_conv_bn_weights


            

Reported by Pylint.

Too many arguments (10/5)
Error

Line: 24 Column: 5

                  """
    _FLOAT_MODULE = torch.nn.intrinsic.ConvReLU1d  # type: ignore[assignment]

    def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding=0, dilation=1, groups=1, bias=True,
                 padding_mode='zeros'):
        super(ConvReLU1d, self).__init__(
            in_channels, out_channels, kernel_size, stride=stride,
            padding=padding, dilation=dilation, groups=groups, bias=bias,

            

Reported by Pylint.

Consider using Python 3 style super() without arguments
Error

Line: 27 Column: 9

                  def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding=0, dilation=1, groups=1, bias=True,
                 padding_mode='zeros'):
        super(ConvReLU1d, self).__init__(
            in_channels, out_channels, kernel_size, stride=stride,
            padding=padding, dilation=dilation, groups=groups, bias=bias,
            padding_mode=padding_mode)

    def forward(self, input):

            

Reported by Pylint.

Using type() instead of isinstance() for a typecheck.
Error

Line: 50 Column: 12

              
    @classmethod
    def from_float(cls, mod):
        if type(mod) == torch.nn.intrinsic.qat.ConvBnReLU1d:
            mod.weight, mod.bias = fuse_conv_bn_weights(
                mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var,
                mod.bn.eps, mod.bn.weight, mod.bn.bias)
        return super(ConvReLU1d, cls).from_float(mod)


            

Reported by Pylint.

Too many arguments (10/5)
Error

Line: 68 Column: 5

                  """
    _FLOAT_MODULE = torch.nn.intrinsic.ConvReLU2d  # type: ignore[assignment]

    def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding=0, dilation=1, groups=1, bias=True,
                 padding_mode='zeros'):
        super(ConvReLU2d, self).__init__(
            in_channels, out_channels, kernel_size, stride=stride,
            padding=padding, dilation=dilation, groups=groups, bias=bias,

            

Reported by Pylint.

Consider using Python 3 style super() without arguments
Error

Line: 71 Column: 9

                  def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding=0, dilation=1, groups=1, bias=True,
                 padding_mode='zeros'):
        super(ConvReLU2d, self).__init__(
            in_channels, out_channels, kernel_size, stride=stride,
            padding=padding, dilation=dilation, groups=groups, bias=bias,
            padding_mode=padding_mode)

    def forward(self, input):

            

Reported by Pylint.

torch/quantization/_correct_bias.py
15 issues
Module 'torch' has no 'mean' member
Error

Line: 119 Column: 30

                          dims = list(range(quantization_error.dim()))
            # Note: we don't want to take the mean over the output channel dimension
            dims.remove(1)
            expected_error = torch.mean(quantization_error, dims)

            updated_bias = bias.data - expected_error

            bias.data = updated_bias


            

Reported by Pylint.

Parameters differ from overridden 'forward' method
Error

Line: 47 Column: 5

                      self.float_sum = None
        self.quant_sum = None

    def forward(self, x, y):
        ''' The inputs x,y are output data from the quantized and floating-point modules.
        x is for the quantized module, y is for the floating point module
        '''
        if x.is_quantized:
            x = x.dequantize()

            

Reported by Pylint.

Dangerous default value _supported_modules_quantized (builtins.set) as argument
Error

Line: 76 Column: 1

                      self.float_sum = None
        self.quant_sum = None

def bias_correction(float_model, quantized_model, img_data, target_modules=_supported_modules_quantized, neval_batches=None):
    ''' Using numeric suite shadow module, the expected output of the floating point and quantized modules
    is recorded. Using that data the bias of supported modules is shifted to compensate for the drift caused
    by quantization
    Paper reference: https://arxiv.org/pdf/1906.04721.pdf (Section 4.2)


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
import torch.nn as nn
import torch.nn.quantized as nnq

import torch.quantization
import torch.quantization._numeric_suite as ns

_supported_modules = {nn.Linear, nn.Conv2d}
_supported_modules_quantized = {nnq.Linear, nnq.Conv2d}

            

Reported by Pylint.

Unnecessary "else" after "return"
Error

Line: 20 Column: 5

                  '''Splits full name of submodule into parent submodule's full name and submodule's name
    '''
    split_name = name.rsplit('.', 1)
    if len(split_name) == 1:
        return '', split_name[0]
    else:
        return split_name[0], split_name[1]

def get_param(module, attr):

            

Reported by Pylint.

Unnecessary "else" after "return"
Error

Line: 30 Column: 5

                  gives a function that will give you the raw tensor, this function takes care of that logic
    '''
    param = getattr(module, attr, None)
    if callable(param):
        return param()
    else:
        return param

class MeanShadowLogger(ns.Logger):

            

Reported by Pylint.

Consider using Python 3 style super() without arguments
Error

Line: 40 Column: 9

                  of the data passed to the floating point and quantized models
    """
    def __init__(self):
        super(MeanShadowLogger, self).__init__()
        self.stats["float"] = None
        self.stats["quantized"] = None
        self.count = 0
        self.float_sum = None
        self.quant_sum = None

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 69 Column: 5

                          self.float_sum += y
            self.stats["float"] = self.float_sum / self.count

    def clear(self):
        self.stats["float"] = None
        self.stats["quantized"] = None
        self.count = 0
        self.float_sum = None
        self.quant_sum = None

            

Reported by Pylint.

Line too long (125/100)
Error

Line: 76 Column: 1

                      self.float_sum = None
        self.quant_sum = None

def bias_correction(float_model, quantized_model, img_data, target_modules=_supported_modules_quantized, neval_batches=None):
    ''' Using numeric suite shadow module, the expected output of the floating point and quantized modules
    is recorded. Using that data the bias of supported modules is shifted to compensate for the drift caused
    by quantization
    Paper reference: https://arxiv.org/pdf/1906.04721.pdf (Section 4.2)


            

Reported by Pylint.

Too many local variables (22/15)
Error

Line: 76 Column: 1

                      self.float_sum = None
        self.quant_sum = None

def bias_correction(float_model, quantized_model, img_data, target_modules=_supported_modules_quantized, neval_batches=None):
    ''' Using numeric suite shadow module, the expected output of the floating point and quantized modules
    is recorded. Using that data the bias of supported modules is shifted to compensate for the drift caused
    by quantization
    Paper reference: https://arxiv.org/pdf/1906.04721.pdf (Section 4.2)


            

Reported by Pylint.

torch/utils/throughput_benchmark.py
15 issues
Access to a protected member _C of a client class
Error

Line: 150 Column: 18

                          - num_iters - number of actual iterations the benchmark have made
            - avg_latency_ms - average time it took to infer on one input example in milliseconds
        '''
        config = torch._C.BenchmarkConfig()
        config.num_calling_threads = num_calling_threads
        config.num_warmup_iters = num_warmup_iters
        config.num_iters = num_iters
        config.profiler_output_path = profiler_output_path
        c_stats = self._benchmark.benchmark(config)

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              
import torch._C

def format_time(time_us=None, time_ms=None, time_s=None):
    '''Defines how to format time'''
    assert sum([time_us is not None, time_ms is not None, time_s is not None]) == 1

    US_IN_SECOND = 1e6
    US_IN_MS = 1e3

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 6
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

              
def format_time(time_us=None, time_ms=None, time_s=None):
    '''Defines how to format time'''
    assert sum([time_us is not None, time_ms is not None, time_s is not None]) == 1

    US_IN_SECOND = 1e6
    US_IN_MS = 1e3

    if time_us is None:

            

Reported by Bandit.

Variable name "US_IN_SECOND" doesn't conform to snake_case naming style
Error

Line: 8 Column: 5

                  '''Defines how to format time'''
    assert sum([time_us is not None, time_ms is not None, time_s is not None]) == 1

    US_IN_SECOND = 1e6
    US_IN_MS = 1e3

    if time_us is None:
        if time_ms is not None:
            time_us = time_ms * US_IN_MS

            

Reported by Pylint.

Variable name "US_IN_MS" doesn't conform to snake_case naming style
Error

Line: 9 Column: 5

                  assert sum([time_us is not None, time_ms is not None, time_s is not None]) == 1

    US_IN_SECOND = 1e6
    US_IN_MS = 1e3

    if time_us is None:
        if time_ms is not None:
            time_us = time_ms * US_IN_MS
        elif time_s is not None:

            

Reported by Pylint.

Missing class docstring
Error

Line: 26 Column: 1

                  return '{:.3f}us'.format(time_us)


class ExecutionStats(object):
    def __init__(self, c_stats, benchmark_config):
        self._c_stats = c_stats
        self.benchmark_config = benchmark_config

    @property

            

Reported by Pylint.

Class 'ExecutionStats' inherits from object, can be safely removed from bases in python3
Error

Line: 26 Column: 1

                  return '{:.3f}us'.format(time_us)


class ExecutionStats(object):
    def __init__(self, c_stats, benchmark_config):
        self._c_stats = c_stats
        self.benchmark_config = benchmark_config

    @property

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 32 Column: 5

                      self.benchmark_config = benchmark_config

    @property
    def latency_avg_ms(self):
        return self._c_stats.latency_avg_ms

    @property
    def num_iters(self):
        return self._c_stats.num_iters

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 36 Column: 5

                      return self._c_stats.latency_avg_ms

    @property
    def num_iters(self):
        return self._c_stats.num_iters

    @property
    def iters_per_second(self):
        '''

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 47 Column: 5

                      return self.num_iters / self.total_time_seconds

    @property
    def total_time_seconds(self):
        return self.num_iters * (
            self.latency_avg_ms / 1000.0) / self.benchmark_config.num_calling_threads


    def __str__(self):

            

Reported by Pylint.

torch/nn/quantized/modules/batchnorm.py
14 issues
Redefining built-in 'input'
Error

Line: 16 Column: 23

                      self.scale = 1.0
        self.zero_point = 0

    def forward(self, input):
        return torch.ops.quantized.batch_norm2d(input, self.weight, self.bias, self.running_mean,
                                                self.running_var, self.eps, self.scale, self.zero_point)

    def _get_name(self):
        return 'QuantizedBatchNorm2d'

            

Reported by Pylint.

TODO: dedup with BatchNorm2d
Error

Line: 38 Column: 3

                      new_mod.zero_point = int(zero_point)
        return new_mod

# TODO: dedup with BatchNorm2d
class BatchNorm3d(torch.nn.BatchNorm3d):
    r"""This is the quantized version of :class:`~torch.nn.BatchNorm3d`.
    """

    def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 50 Column: 23

                      self.scale = 1.0
        self.zero_point = 0

    def forward(self, input):
        return torch.ops.quantized.batch_norm3d(input, self.weight, self.bias, self.running_mean,
                                                self.running_var, self.eps, self.scale, self.zero_point)

    def _get_name(self):
        return 'QuantizedBatchNorm3d'

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
import torch.nn.quantized.functional
import torch.nn.intrinsic as nni

class BatchNorm2d(torch.nn.BatchNorm2d):
    r"""This is the quantized version of :class:`~torch.nn.BatchNorm2d`.
    """

    def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None) -> None:

            

Reported by Pylint.

Too many arguments (6/5)
Error

Line: 9 Column: 5

                  r"""This is the quantized version of :class:`~torch.nn.BatchNorm2d`.
    """

    def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None) -> None:
        factory_kwargs = {'device': device, 'dtype': dtype}
        super(BatchNorm2d, self).__init__(num_features, **factory_kwargs)
        self.eps = eps
        self.scale = 1.0
        self.zero_point = 0

            

Reported by Pylint.

Consider using Python 3 style super() without arguments
Error

Line: 11 Column: 9

              
    def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None) -> None:
        factory_kwargs = {'device': device, 'dtype': dtype}
        super(BatchNorm2d, self).__init__(num_features, **factory_kwargs)
        self.eps = eps
        self.scale = 1.0
        self.zero_point = 0

    def forward(self, input):

            

Reported by Pylint.

Line too long (104/100)
Error

Line: 18 Column: 1

              
    def forward(self, input):
        return torch.ops.quantized.batch_norm2d(input, self.weight, self.bias, self.running_mean,
                                                self.running_var, self.eps, self.scale, self.zero_point)

    def _get_name(self):
        return 'QuantizedBatchNorm2d'

    @classmethod

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 24 Column: 5

                      return 'QuantizedBatchNorm2d'

    @classmethod
    def from_float(cls, mod):
        activation_post_process = mod.activation_post_process
        if type(mod) == nni.BNReLU2d:
            mod = mod[0]
        scale, zero_point = activation_post_process.calculate_qparams()
        new_mod = cls(mod.num_features, mod.eps)

            

Reported by Pylint.

Using type() instead of isinstance() for a typecheck.
Error

Line: 26 Column: 12

                  @classmethod
    def from_float(cls, mod):
        activation_post_process = mod.activation_post_process
        if type(mod) == nni.BNReLU2d:
            mod = mod[0]
        scale, zero_point = activation_post_process.calculate_qparams()
        new_mod = cls(mod.num_features, mod.eps)
        new_mod.weight = mod.weight
        new_mod.bias = mod.bias

            

Reported by Pylint.

Too many arguments (6/5)
Error

Line: 43 Column: 5

                  r"""This is the quantized version of :class:`~torch.nn.BatchNorm3d`.
    """

    def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
        factory_kwargs = {'device': device, 'dtype': dtype}
        super(BatchNorm3d, self).__init__(num_features, **factory_kwargs)
        self.eps = eps
        self.scale = 1.0
        self.zero_point = 0

            

Reported by Pylint.

torch/utils/tensorboard/_onnx_graph.py
14 issues
Unable to import 'tensorboard.compat.proto.graph_pb2'
Error

Line: 1 Column: 1

              from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto


def load_onnx_graph(fname):
    import onnx

            

Reported by Pylint.

Unable to import 'tensorboard.compat.proto.node_def_pb2'
Error

Line: 2 Column: 1

              from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto


def load_onnx_graph(fname):
    import onnx

            

Reported by Pylint.

Unable to import 'tensorboard.compat.proto.versions_pb2'
Error

Line: 3 Column: 1

              from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto


def load_onnx_graph(fname):
    import onnx

            

Reported by Pylint.

Unable to import 'tensorboard.compat.proto.attr_value_pb2'
Error

Line: 4 Column: 1

              from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto


def load_onnx_graph(fname):
    import onnx

            

Reported by Pylint.

Unable to import 'tensorboard.compat.proto.tensor_shape_pb2'
Error

Line: 5 Column: 1

              from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto


def load_onnx_graph(fname):
    import onnx
    m = onnx.load(fname)

            

Reported by Pylint.

Unable to import 'onnx'
Error

Line: 9 Column: 5

              

def load_onnx_graph(fname):
    import onnx
    m = onnx.load(fname)
    g = m.graph
    return parse(g)



            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto


def load_onnx_graph(fname):
    import onnx

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 8 Column: 1

              from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto


def load_onnx_graph(fname):
    import onnx
    m = onnx.load(fname)
    g = m.graph
    return parse(g)


            

Reported by Pylint.

Import outside toplevel (onnx)
Error

Line: 9 Column: 5

              

def load_onnx_graph(fname):
    import onnx
    m = onnx.load(fname)
    g = m.graph
    return parse(g)



            

Reported by Pylint.

Variable name "m" doesn't conform to snake_case naming style
Error

Line: 10 Column: 5

              
def load_onnx_graph(fname):
    import onnx
    m = onnx.load(fname)
    g = m.graph
    return parse(g)


def parse(graph):

            

Reported by Pylint.

torch/fx/passes/shape_prop.py
14 issues
Module 'torch' has no 'Size' member
Error

Line: 11 Column: 13

                  # about a tensor within a PyTorch program.

    # General Tensor metadata
    shape : torch.Size
    dtype : torch.dtype
    requires_grad : bool
    stride : Tuple[int]
    memory_format : Optional[torch.memory_format]


            

Reported by Pylint.

Module 'torch' has no 'dtype' member
Error

Line: 12 Column: 13

              
    # General Tensor metadata
    shape : torch.Size
    dtype : torch.dtype
    requires_grad : bool
    stride : Tuple[int]
    memory_format : Optional[torch.memory_format]

    # Quantization metadata

            

Reported by Pylint.

Module 'torch' has no 'memory_format' member
Error

Line: 15 Column: 30

                  dtype : torch.dtype
    requires_grad : bool
    stride : Tuple[int]
    memory_format : Optional[torch.memory_format]

    # Quantization metadata
    is_quantized : bool
    qscheme : Optional[torch.qscheme]
    q_scale : Optional[float]

            

Reported by Pylint.

Module 'torch' has no 'qscheme' member
Error

Line: 19 Column: 24

              
    # Quantization metadata
    is_quantized : bool
    qscheme : Optional[torch.qscheme]
    q_scale : Optional[float]
    q_zero_point : Optional[int]

def extract_tensor_metadata(result : torch.Tensor) -> TensorMetadata:
    """

            

Reported by Pylint.

Module 'torch' has no 'contiguous_format' member
Error

Line: 33 Column: 9

                  stride = result.stride()

    memory_formats = {
        torch.contiguous_format,
        torch.channels_last,
        torch.channels_last_3d,
    }

    memory_format = None

            

Reported by Pylint.

Module 'torch' has no 'channels_last' member
Error

Line: 34 Column: 9

              
    memory_formats = {
        torch.contiguous_format,
        torch.channels_last,
        torch.channels_last_3d,
    }

    memory_format = None


            

Reported by Pylint.

Module 'torch' has no 'channels_last_3d' member
Error

Line: 35 Column: 9

                  memory_formats = {
        torch.contiguous_format,
        torch.channels_last,
        torch.channels_last_3d,
    }

    memory_format = None

    for query_format in memory_formats:

            

Reported by Pylint.

Module 'torch' has no 'per_tensor_symmetric' member
Error

Line: 53 Column: 49

                  if is_quantized:
        qscheme = result.qscheme()

        if qscheme in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
            q_scale = result.q_scale()
            q_zero_point = result.q_zero_point()


    return TensorMetadata(

            

Reported by Pylint.

Module 'torch' has no 'per_tensor_affine' member
Error

Line: 53 Column: 24

                  if is_quantized:
        qscheme = result.qscheme()

        if qscheme in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
            q_scale = result.q_scale()
            q_zero_point = result.q_zero_point()


    return TensorMetadata(

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch
import torch.fx
from torch.fx.node import Node, map_aggregate
from typing import Any, Tuple, NamedTuple, Optional

class TensorMetadata(NamedTuple):
    # TensorMetadata is a structure containing pertinent information
    # about a tensor within a PyTorch program.


            

Reported by Pylint.