The following issues were found

tools/shared/cwrap_common.py
28 issues
TODO: Uggggh, parsing the schema string here, really???
Error

Line: 48 Column: 3

                          declaration['name'], declaration['overload_name'])
    else:
        declaration['type_wrapper_name'] = declaration['name']
    # TODO: Uggggh, parsing the schema string here, really???
    declaration['operator_name_with_overload'] = declaration['schema_string'].split('(')[0]
    if declaration['schema_string']:
        declaration['unqual_schema_string'] = declaration['schema_string'].split('::')[1]
        declaration['unqual_operator_name_with_overload'] = declaration['operator_name_with_overload'].split('::')[1]
    else:

            

Reported by Pylint.

TODO(zach): why does cwrap not propagate 'name'? I need it
Error

Line: 71 Column: 3

                  # Propagate defaults from declaration to options
    for option in declaration['options']:
        for k, v in declaration.items():
            # TODO(zach): why does cwrap not propagate 'name'? I need it
            # propagaged for ATen
            if k != 'options':
                option.setdefault(k, v)

# TODO(zach): added option to remove keyword handling for C++ which cannot

            

Reported by Pylint.

TODO(zach): added option to remove keyword handling for C++ which cannot
Error

Line: 76 Column: 3

                          if k != 'options':
                option.setdefault(k, v)

# TODO(zach): added option to remove keyword handling for C++ which cannot
# support it.

Option = Dict[str, Any]



            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # this code should be common among cwrap and ATen preprocessing
# for now, I have put it in one place but right now is copied out of cwrap

import copy
from typing import Any, Dict, Iterable, List, Union

Arg = Dict[str, Any]

def parse_arguments(args: List[Union[str, Arg]]) -> List[Arg]:

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 9 Column: 1

              
Arg = Dict[str, Any]

def parse_arguments(args: List[Union[str, Arg]]) -> List[Arg]:
    new_args = []
    for arg in args:
        # Simple arg declaration of form "<type> <name>"
        if isinstance(arg, str):
            t, _, name = arg.partition(' ')

            

Reported by Pylint.

Variable name "t" doesn't conform to snake_case naming style
Error

Line: 14 Column: 13

                  for arg in args:
        # Simple arg declaration of form "<type> <name>"
        if isinstance(arg, str):
            t, _, name = arg.partition(' ')
            new_args.append({'type': t, 'name': name})
        elif isinstance(arg, dict):
            if 'arg' in arg:
                arg['type'], _, arg['name'] = arg['arg'].partition(' ')
                del arg['arg']

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 29 Column: 1

              Declaration = Dict[str, Any]


def set_declaration_defaults(declaration: Declaration) -> None:
    if 'schema_string' not in declaration:
        # This happens for legacy TH bindings like
        # _thnn_conv_depthwise2d_backward
        declaration['schema_string'] = ''
    declaration.setdefault('arguments', [])

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 40
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                      declaration['cname'] = declaration['name']
    if 'backends' not in declaration:
        declaration['backends'] = ['CPU', 'CUDA']
    assert 'api_name' not in declaration
    declaration['api_name'] = declaration['name']
    # NB: keep this in sync with gen_autograd.py
    if declaration.get('overload_name'):
        declaration['type_wrapper_name'] = "{}_{}".format(
            declaration['name'], declaration['overload_name'])

            

Reported by Bandit.

Line too long (117/100)
Error

Line: 52 Column: 1

                  declaration['operator_name_with_overload'] = declaration['schema_string'].split('(')[0]
    if declaration['schema_string']:
        declaration['unqual_schema_string'] = declaration['schema_string'].split('::')[1]
        declaration['unqual_operator_name_with_overload'] = declaration['operator_name_with_overload'].split('::')[1]
    else:
        declaration['unqual_schema_string'] = ''
        declaration['unqual_operator_name_with_overload'] = ''
    # Simulate multiple dispatch, even if it's not necessary
    if 'options' not in declaration:

            

Reported by Pylint.

Variable name "v" doesn't conform to snake_case naming style
Error

Line: 70 Column: 16

                      option['schema_order_arguments'] = parse_arguments(option['schema_order_arguments'])
    # Propagate defaults from declaration to options
    for option in declaration['options']:
        for k, v in declaration.items():
            # TODO(zach): why does cwrap not propagate 'name'? I need it
            # propagaged for ATen
            if k != 'options':
                option.setdefault(k, v)


            

Reported by Pylint.

caffe2/python/operator_test/sparse_itemwise_dropout_with_replacement_op_test.py
28 issues
Unable to import 'hypothesis'
Error

Line: 7 Column: 1

              

from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np


class SparseItemwiseDropoutWithReplacementTest(hu.HypothesisTestCase):

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 14 Column: 35

              
class SparseItemwiseDropoutWithReplacementTest(hu.HypothesisTestCase):
    @given(**hu.gcs_cpu_only)
    def test_no_dropout(self, gc, dc):
        X = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.int64)
        Lengths = np.array([2, 2, 2, 2, 2]).astype(np.int32)
        replacement_value = -1
        self.ws.create_blob("X").feed(X)
        self.ws.create_blob("Lengths").feed(Lengths)

            

Reported by Pylint.

Unused argument 'gc'
Error

Line: 14 Column: 31

              
class SparseItemwiseDropoutWithReplacementTest(hu.HypothesisTestCase):
    @given(**hu.gcs_cpu_only)
    def test_no_dropout(self, gc, dc):
        X = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.int64)
        Lengths = np.array([2, 2, 2, 2, 2]).astype(np.int32)
        replacement_value = -1
        self.ws.create_blob("X").feed(X)
        self.ws.create_blob("Lengths").feed(Lengths)

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 32 Column: 36

                                           "Lengths should stay unchanged.")

    @given(**hu.gcs_cpu_only)
    def test_all_dropout(self, gc, dc):
        X = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.int64)
        Lengths = np.array([2, 2, 2, 2, 2]).astype(np.int32)
        replacement_value = -1
        self.ws.create_blob("X").feed(X)
        self.ws.create_blob("Lengths").feed(Lengths)

            

Reported by Pylint.

Unused argument 'gc'
Error

Line: 32 Column: 32

                                           "Lengths should stay unchanged.")

    @given(**hu.gcs_cpu_only)
    def test_all_dropout(self, gc, dc):
        X = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.int64)
        Lengths = np.array([2, 2, 2, 2, 2]).astype(np.int32)
        replacement_value = -1
        self.ws.create_blob("X").feed(X)
        self.ws.create_blob("Lengths").feed(Lengths)

            

Reported by Pylint.

Unused argument 'gc'
Error

Line: 52 Column: 44

                      self.assertEqual(sum(lengths), len(y))

    @given(**hu.gcs_cpu_only)
    def test_all_dropout_empty_input(self, gc, dc):
        X = np.array([]).astype(np.int64)
        Lengths = np.array([0]).astype(np.int32)
        replacement_value = -1
        self.ws.create_blob("X").feed(X)
        self.ws.create_blob("Lengths").feed(Lengths)

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 52 Column: 48

                      self.assertEqual(sum(lengths), len(y))

    @given(**hu.gcs_cpu_only)
    def test_all_dropout_empty_input(self, gc, dc):
        X = np.array([]).astype(np.int64)
        Lengths = np.array([0]).astype(np.int32)
        replacement_value = -1
        self.ws.create_blob("X").feed(X)
        self.ws.create_blob("Lengths").feed(Lengths)

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              




from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np

            

Reported by Pylint.

Missing class docstring
Error

Line: 12 Column: 1

              import numpy as np


class SparseItemwiseDropoutWithReplacementTest(hu.HypothesisTestCase):
    @given(**hu.gcs_cpu_only)
    def test_no_dropout(self, gc, dc):
        X = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.int64)
        Lengths = np.array([2, 2, 2, 2, 2]).astype(np.int32)
        replacement_value = -1

            

Reported by Pylint.

Argument name "gc" doesn't conform to snake_case naming style
Error

Line: 14 Column: 5

              
class SparseItemwiseDropoutWithReplacementTest(hu.HypothesisTestCase):
    @given(**hu.gcs_cpu_only)
    def test_no_dropout(self, gc, dc):
        X = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.int64)
        Lengths = np.array([2, 2, 2, 2, 2]).astype(np.int32)
        replacement_value = -1
        self.ws.create_blob("X").feed(X)
        self.ws.create_blob("Lengths").feed(Lengths)

            

Reported by Pylint.

aten/src/ATen/nnapi/codegen.py
28 issues
Unused argument 'argv'
Error

Line: 75 Column: 10

              ]


def main(argv):
    struct_members = []
    load_functions = []
    define_checks = []

    for ret, name, args in NNAPI_FUNCTIONS:

            

Reported by Pylint.

Line too long (108/100)
Error

Line: 39 Column: 1

              
NNAPI_FUNCTIONS = [
    ("int", "ANeuralNetworks_getDeviceCount", "uint32_t* numDevices"),  # noqa: B950
    ("int", "ANeuralNetworks_getDevice", "uint32_t devIndex, ANeuralNetworksDevice** device"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getName", "const ANeuralNetworksDevice* device, const char** name"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getVersion", "const ANeuralNetworksDevice* device, const char** version"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"),  # noqa: B950
    ("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"),  # noqa: B950
    ("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"),  # noqa: B950

            

Reported by Pylint.

Line too long (117/100)
Error

Line: 40 Column: 1

              NNAPI_FUNCTIONS = [
    ("int", "ANeuralNetworks_getDeviceCount", "uint32_t* numDevices"),  # noqa: B950
    ("int", "ANeuralNetworks_getDevice", "uint32_t devIndex, ANeuralNetworksDevice** device"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getName", "const ANeuralNetworksDevice* device, const char** name"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getVersion", "const ANeuralNetworksDevice* device, const char** version"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"),  # noqa: B950
    ("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"),  # noqa: B950
    ("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"),  # noqa: B950
    ("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"),  # noqa: B950

            

Reported by Pylint.

Line too long (123/100)
Error

Line: 41 Column: 1

                  ("int", "ANeuralNetworks_getDeviceCount", "uint32_t* numDevices"),  # noqa: B950
    ("int", "ANeuralNetworks_getDevice", "uint32_t devIndex, ANeuralNetworksDevice** device"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getName", "const ANeuralNetworksDevice* device, const char** name"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getVersion", "const ANeuralNetworksDevice* device, const char** version"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"),  # noqa: B950
    ("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"),  # noqa: B950
    ("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"),  # noqa: B950
    ("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"),  # noqa: B950
    ("int", "ANeuralNetworksMemory_createFromFd", "size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory"),  # noqa: B950

            

Reported by Pylint.

Line too long (129/100)
Error

Line: 42 Column: 1

                  ("int", "ANeuralNetworks_getDevice", "uint32_t devIndex, ANeuralNetworksDevice** device"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getName", "const ANeuralNetworksDevice* device, const char** name"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getVersion", "const ANeuralNetworksDevice* device, const char** version"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"),  # noqa: B950
    ("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"),  # noqa: B950
    ("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"),  # noqa: B950
    ("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"),  # noqa: B950
    ("int", "ANeuralNetworksMemory_createFromFd", "size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory"),  # noqa: B950
    ("void", "ANeuralNetworksMemory_free", "ANeuralNetworksMemory* memory"),  # noqa: B950

            

Reported by Pylint.

Line too long (207/100)
Error

Line: 43 Column: 1

                  ("int", "ANeuralNetworksDevice_getName", "const ANeuralNetworksDevice* device, const char** name"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getVersion", "const ANeuralNetworksDevice* device, const char** version"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"),  # noqa: B950
    ("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"),  # noqa: B950
    ("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"),  # noqa: B950
    ("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"),  # noqa: B950
    ("int", "ANeuralNetworksMemory_createFromFd", "size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory"),  # noqa: B950
    ("void", "ANeuralNetworksMemory_free", "ANeuralNetworksMemory* memory"),  # noqa: B950
    ("int", "ANeuralNetworksModel_create", "ANeuralNetworksModel** model"),  # noqa: B950

            

Reported by Pylint.

Line too long (212/100)
Error

Line: 44 Column: 1

                  ("int", "ANeuralNetworksDevice_getVersion", "const ANeuralNetworksDevice* device, const char** version"),  # noqa: B950
    ("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"),  # noqa: B950
    ("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"),  # noqa: B950
    ("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"),  # noqa: B950
    ("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"),  # noqa: B950
    ("int", "ANeuralNetworksMemory_createFromFd", "size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory"),  # noqa: B950
    ("void", "ANeuralNetworksMemory_free", "ANeuralNetworksMemory* memory"),  # noqa: B950
    ("int", "ANeuralNetworksModel_create", "ANeuralNetworksModel** model"),  # noqa: B950
    ("void", "ANeuralNetworksModel_free", "ANeuralNetworksModel* model"),  # noqa: B950

            

Reported by Pylint.

Line too long (101/100)
Error

Line: 45 Column: 1

                  ("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"),  # noqa: B950
    ("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"),  # noqa: B950
    ("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"),  # noqa: B950
    ("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"),  # noqa: B950
    ("int", "ANeuralNetworksMemory_createFromFd", "size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory"),  # noqa: B950
    ("void", "ANeuralNetworksMemory_free", "ANeuralNetworksMemory* memory"),  # noqa: B950
    ("int", "ANeuralNetworksModel_create", "ANeuralNetworksModel** model"),  # noqa: B950
    ("void", "ANeuralNetworksModel_free", "ANeuralNetworksModel* model"),  # noqa: B950
    ("int", "ANeuralNetworksModel_finish", "ANeuralNetworksModel* model"),  # noqa: B950

            

Reported by Pylint.

Line too long (147/100)
Error

Line: 46 Column: 1

                  ("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"),  # noqa: B950
    ("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"),  # noqa: B950
    ("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"),  # noqa: B950
    ("int", "ANeuralNetworksMemory_createFromFd", "size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory"),  # noqa: B950
    ("void", "ANeuralNetworksMemory_free", "ANeuralNetworksMemory* memory"),  # noqa: B950
    ("int", "ANeuralNetworksModel_create", "ANeuralNetworksModel** model"),  # noqa: B950
    ("void", "ANeuralNetworksModel_free", "ANeuralNetworksModel* model"),  # noqa: B950
    ("int", "ANeuralNetworksModel_finish", "ANeuralNetworksModel* model"),  # noqa: B950
    ("int", "ANeuralNetworksModel_addOperand", "ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type"),  # noqa: B950

            

Reported by Pylint.

Line too long (132/100)
Error

Line: 51 Column: 1

                  ("int", "ANeuralNetworksModel_create", "ANeuralNetworksModel** model"),  # noqa: B950
    ("void", "ANeuralNetworksModel_free", "ANeuralNetworksModel* model"),  # noqa: B950
    ("int", "ANeuralNetworksModel_finish", "ANeuralNetworksModel* model"),  # noqa: B950
    ("int", "ANeuralNetworksModel_addOperand", "ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type"),  # noqa: B950
    ("int", "ANeuralNetworksModel_setOperandValue", "ANeuralNetworksModel* model, int32_t index, const void* buffer, size_t length"),  # noqa: B950
    ("int", "ANeuralNetworksModel_setOperandValueFromMemory", "ANeuralNetworksModel* model, int32_t index, const ANeuralNetworksMemory* memory, size_t offset, size_t length"),  # noqa: B950
    ("int", "ANeuralNetworksModel_addOperation", "ANeuralNetworksModel* model, ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, const uint32_t* outputs"),  # noqa: B950
    ("int", "ANeuralNetworksModel_identifyInputsAndOutputs", "ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, const uint32_t* outputs"),  # noqa: B950
    ("int", "ANeuralNetworksModel_relaxComputationFloat32toFloat16", "ANeuralNetworksModel* model, bool allow"),  # noqa: B950

            

Reported by Pylint.

caffe2/quantization/server/resize_nearest_dnnlowp_op_test.py
28 issues
Unable to import 'hypothesis.strategies'
Error

Line: 4 Column: 1

              

import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings



            

Reported by Pylint.

Unable to import 'hypothesis'
Error

Line: 7 Column: 1

              import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings


dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])


            

Reported by Pylint.

Unused argument 'gc'
Error

Line: 25 Column: 65

                      **hu.gcs_cpu_only
    )
    @settings(deadline=None, max_examples=50)
    def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
        X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)

        quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
        resize_nearest = core.CreateOperator(
            "Int8ResizeNearest",

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 25 Column: 69

                      **hu.gcs_cpu_only
    )
    @settings(deadline=None, max_examples=50)
    def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
        X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)

        quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
        resize_nearest = core.CreateOperator(
            "Int8ResizeNearest",

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              

import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings



            

Reported by Pylint.

Missing class docstring
Error

Line: 14 Column: 1

              workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])


class DNNLowPResizeNearestOpTest(hu.HypothesisTestCase):
    @given(
        N=st.integers(0, 3),
        H=st.integers(10, 300),
        W=st.integers(10, 300),
        C=st.integers(1, 32),

            

Reported by Pylint.

Argument name "H" doesn't conform to snake_case naming style
Error

Line: 24 Column: 5

                      scale_h=st.floats(0.25, 4.0) | st.just(2.0),
        **hu.gcs_cpu_only
    )
    @settings(deadline=None, max_examples=50)
    def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
        X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)

        quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
        resize_nearest = core.CreateOperator(

            

Reported by Pylint.

Argument name "W" doesn't conform to snake_case naming style
Error

Line: 24 Column: 5

                      scale_h=st.floats(0.25, 4.0) | st.just(2.0),
        **hu.gcs_cpu_only
    )
    @settings(deadline=None, max_examples=50)
    def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
        X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)

        quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
        resize_nearest = core.CreateOperator(

            

Reported by Pylint.

Argument name "C" doesn't conform to snake_case naming style
Error

Line: 24 Column: 5

                      scale_h=st.floats(0.25, 4.0) | st.just(2.0),
        **hu.gcs_cpu_only
    )
    @settings(deadline=None, max_examples=50)
    def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
        X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)

        quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
        resize_nearest = core.CreateOperator(

            

Reported by Pylint.

Argument name "gc" doesn't conform to snake_case naming style
Error

Line: 24 Column: 5

                      scale_h=st.floats(0.25, 4.0) | st.just(2.0),
        **hu.gcs_cpu_only
    )
    @settings(deadline=None, max_examples=50)
    def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
        X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)

        quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
        resize_nearest = core.CreateOperator(

            

Reported by Pylint.

torch/distributions/dirichlet.py
28 issues
Module 'torch' has no '_dirichlet_grad' member
Error

Line: 11 Column: 12

              # This helper is exposed for testing.
def _Dirichlet_backward(x, concentration, grad_output):
    total = concentration.sum(-1, True).expand_as(concentration)
    grad = torch._dirichlet_grad(x, concentration, total)
    return grad * (grad_output - (x * grad_output).sum(-1, True))


class _Dirichlet(Function):
    @staticmethod

            

Reported by Pylint.

Module 'torch' has no '_sample_dirichlet' member
Error

Line: 18 Column: 13

              class _Dirichlet(Function):
    @staticmethod
    def forward(ctx, concentration):
        x = torch._sample_dirichlet(concentration)
        ctx.save_for_backward(x, concentration)
        return x

    @staticmethod
    @once_differentiable

            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 56 Column: 23

              
    def expand(self, batch_shape, _instance=None):
        new = self._get_checked_instance(Dirichlet, _instance)
        batch_shape = torch.Size(batch_shape)
        new.concentration = self.concentration.expand(batch_shape + self.event_shape)
        super(Dirichlet, new).__init__(batch_shape, self.event_shape, validate_args=False)
        new._validate_args = self._validate_args
        return new


            

Reported by Pylint.

Module 'torch' has no 'log' member
Error

Line: 70 Column: 18

                  def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)
        return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
                torch.lgamma(self.concentration.sum(-1)) -
                torch.lgamma(self.concentration).sum(-1))

    @property
    def mean(self):

            

Reported by Pylint.

Module 'torch' has no 'lgamma' member
Error

Line: 71 Column: 17

                      if self._validate_args:
            self._validate_sample(value)
        return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
                torch.lgamma(self.concentration.sum(-1)) -
                torch.lgamma(self.concentration).sum(-1))

    @property
    def mean(self):
        return self.concentration / self.concentration.sum(-1, True)

            

Reported by Pylint.

Module 'torch' has no 'lgamma' member
Error

Line: 72 Column: 17

                          self._validate_sample(value)
        return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
                torch.lgamma(self.concentration.sum(-1)) -
                torch.lgamma(self.concentration).sum(-1))

    @property
    def mean(self):
        return self.concentration / self.concentration.sum(-1, True)


            

Reported by Pylint.

Module 'torch' has no 'lgamma' member
Error

Line: 86 Column: 17

                  def entropy(self):
        k = self.concentration.size(-1)
        a0 = self.concentration.sum(-1)
        return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
                (k - a0) * torch.digamma(a0) -
                ((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))

    @property
    def _natural_params(self):

            

Reported by Pylint.

Module 'torch' has no 'lgamma' member
Error

Line: 86 Column: 60

                  def entropy(self):
        k = self.concentration.size(-1)
        a0 = self.concentration.sum(-1)
        return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
                (k - a0) * torch.digamma(a0) -
                ((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))

    @property
    def _natural_params(self):

            

Reported by Pylint.

Module 'torch' has no 'digamma' member
Error

Line: 87 Column: 28

                      k = self.concentration.size(-1)
        a0 = self.concentration.sum(-1)
        return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
                (k - a0) * torch.digamma(a0) -
                ((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))

    @property
    def _natural_params(self):
        return (self.concentration, )

            

Reported by Pylint.

Module 'torch' has no 'digamma' member
Error

Line: 88 Column: 47

                      a0 = self.concentration.sum(-1)
        return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
                (k - a0) * torch.digamma(a0) -
                ((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))

    @property
    def _natural_params(self):
        return (self.concentration, )


            

Reported by Pylint.

caffe2/python/layer_parameter_sharing_test.py
28 issues
Using deprecated method assertEquals()
Error

Line: 23 Column: 13

                              self.model.input_feature_schema.float_features,
                output_dims
            )
            self.assertEquals(self.model.layers[-1].w, 'global_scope/fc/w')
            self.assertEquals(fc1_output(), 'global_scope/fc/output')

            with scope.NameScope('nested_scope'):
                fc2_output = self.model.FC(
                    fc1_output,

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 24 Column: 13

                              output_dims
            )
            self.assertEquals(self.model.layers[-1].w, 'global_scope/fc/w')
            self.assertEquals(fc1_output(), 'global_scope/fc/output')

            with scope.NameScope('nested_scope'):
                fc2_output = self.model.FC(
                    fc1_output,
                    output_dims

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 31 Column: 17

                                  fc1_output,
                    output_dims
                )
                self.assertEquals(self.model.layers[-1].w,
                                  'global_scope/nested_scope/fc/w')
                self.assertEquals(fc2_output(),
                                  'global_scope/nested_scope/fc/output')

                fc3_output = self.model.FC(

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 33 Column: 17

                              )
                self.assertEquals(self.model.layers[-1].w,
                                  'global_scope/nested_scope/fc/w')
                self.assertEquals(fc2_output(),
                                  'global_scope/nested_scope/fc/output')

                fc3_output = self.model.FC(
                    fc1_output,
                    output_dims

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 40 Column: 17

                                  fc1_output,
                    output_dims
                )
                self.assertEquals(self.model.layers[-1].w,
                                  'global_scope/nested_scope/fc_auto_0/w')
                self.assertEquals(fc3_output(),
                                  'global_scope/nested_scope/fc_auto_0/output')

    def test_layer_shared_parameter_name_different_namescopes(self):

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 42 Column: 17

                              )
                self.assertEquals(self.model.layers[-1].w,
                                  'global_scope/nested_scope/fc_auto_0/w')
                self.assertEquals(fc3_output(),
                                  'global_scope/nested_scope/fc_auto_0/output')

    def test_layer_shared_parameter_name_different_namescopes(self):
        output_dims = 2
        with scope.NameScope('global_scope'):

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 54 Column: 21

                                      self.model.input_feature_schema.float_features,
                        output_dims
                    )
                    self.assertEquals(self.model.layers[-1].w,
                                      'global_scope/scope_0/fc/w')
                    self.assertEquals(fc1_output(),
                                      'global_scope/scope_0/fc/output')

                with scope.NameScope('scope_1'):

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 56 Column: 21

                                  )
                    self.assertEquals(self.model.layers[-1].w,
                                      'global_scope/scope_0/fc/w')
                    self.assertEquals(fc1_output(),
                                      'global_scope/scope_0/fc/output')

                with scope.NameScope('scope_1'):
                    fc2_output = self.model.FC(
                        self.model.input_feature_schema.float_features,

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 64 Column: 21

                                      self.model.input_feature_schema.float_features,
                        output_dims
                    )
                    self.assertEquals(self.model.layers[-1].w,
                                      'global_scope/scope_0/fc/w')
                    self.assertEquals(fc2_output(),
                                      'global_scope/scope_1/fc/output')

    def test_layer_shared_parameter_name_within_same_namescope(self):

            

Reported by Pylint.

Using deprecated method assertEquals()
Error

Line: 66 Column: 21

                                  )
                    self.assertEquals(self.model.layers[-1].w,
                                      'global_scope/scope_0/fc/w')
                    self.assertEquals(fc2_output(),
                                      'global_scope/scope_1/fc/output')

    def test_layer_shared_parameter_name_within_same_namescope(self):
        output_dims = 2
        with scope.NameScope('global_scope'):

            

Reported by Pylint.

.circleci/cimodel/data/binary_build_data.py
28 issues
XXX devtoolset7 on CUDA 9.0 is temporarily disabled
Error

Line: 148 Column: 3

                  def get_children(self):
        gpu_versions = self.find_prop("gpu_versions")

        # XXX devtoolset7 on CUDA 9.0 is temporarily disabled
        # see https://github.com/pytorch/pytorch/issues/20066
        if self.find_prop("gcc_config_variant") == 'devtoolset7':
            gpu_versions = filter(lambda x: x != "cuda_90", gpu_versions)

        # XXX disabling conda rocm build since docker images are not there

            

Reported by Pylint.

XXX disabling conda rocm build since docker images are not there
Error

Line: 153 Column: 3

                      if self.find_prop("gcc_config_variant") == 'devtoolset7':
            gpu_versions = filter(lambda x: x != "cuda_90", gpu_versions)

        # XXX disabling conda rocm build since docker images are not there
        if self.find_prop("package_format") == 'conda':
            gpu_versions = filter(lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions)

        # XXX libtorch rocm build  is temporarily disabled
        if self.find_prop("package_format") == 'libtorch':

            

Reported by Pylint.

XXX libtorch rocm build is temporarily disabled
Error

Line: 157 Column: 3

                      if self.find_prop("package_format") == 'conda':
            gpu_versions = filter(lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions)

        # XXX libtorch rocm build  is temporarily disabled
        if self.find_prop("package_format") == 'libtorch':
            gpu_versions = filter(lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions)

        return [ArchConfigNode(self, v) for v in gpu_versions]


            

Reported by Pylint.

Useless super delegation in method '__init__'
Error

Line: 201 Column: 5

              

class LinkingVariantConfigNode(ConfigNode):
    def __init__(self, parent, linking_variant):
        super(LinkingVariantConfigNode, self).__init__(parent, linking_variant)

    def get_children(self):
        return [DependencyInclusionConfigNode(self, v) for v in DEPS_INCLUSION_DIMENSIONS]


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 28 Column: 1

              ]


def get_processor_arch_name(gpu_version):
    return "cpu" if not gpu_version else (
        "cu" + gpu_version.strip("cuda") if gpu_version.startswith("cuda") else gpu_version
    )

LINUX_PACKAGE_VARIANTS = OrderedDict(

            

Reported by Pylint.

Missing class docstring
Error

Line: 99 Column: 1

              ]


class TopLevelNode(ConfigNode):
    def __init__(self, node_name, config_tree_data, smoke):
        super(TopLevelNode, self).__init__(None, node_name)

        self.config_tree_data = config_tree_data
        self.props["smoke"] = smoke

            

Reported by Pylint.

Consider using Python 3 style super() without arguments
Error

Line: 101 Column: 9

              
class TopLevelNode(ConfigNode):
    def __init__(self, node_name, config_tree_data, smoke):
        super(TopLevelNode, self).__init__(None, node_name)

        self.config_tree_data = config_tree_data
        self.props["smoke"] = smoke

    def get_children(self):

            

Reported by Pylint.

Missing class docstring
Error

Line: 110 Column: 1

                      return [OSConfigNode(self, x, c, p) for (x, (c, p)) in self.config_tree_data.items()]


class OSConfigNode(ConfigNode):
    def __init__(self, parent, os_name, gpu_versions, py_tree):
        super(OSConfigNode, self).__init__(parent, os_name)

        self.py_tree = py_tree
        self.props["os_name"] = os_name

            

Reported by Pylint.

Consider using Python 3 style super() without arguments
Error

Line: 112 Column: 9

              
class OSConfigNode(ConfigNode):
    def __init__(self, parent, os_name, gpu_versions, py_tree):
        super(OSConfigNode, self).__init__(parent, os_name)

        self.py_tree = py_tree
        self.props["os_name"] = os_name
        self.props["gpu_versions"] = gpu_versions


            

Reported by Pylint.

Missing class docstring
Error

Line: 122 Column: 1

                      return [PackageFormatConfigNode(self, k, v) for k, v in self.py_tree.items()]


class PackageFormatConfigNode(ConfigNode):
    def __init__(self, parent, package_format, python_versions):
        super(PackageFormatConfigNode, self).__init__(parent, package_format)

        self.props["python_versions"] = python_versions
        self.props["package_format"] = package_format

            

Reported by Pylint.

torch/distributed/optim/functional_adamw.py
28 issues
Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 70 Column: 29

                      if param not in self.state:
            self.state[param] = {}
            state = self.state[param]
            state['step'] = torch.tensor(0.0)
            # Exponential moving average of gradient values
            state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
            # Exponential moving average of squared gradient values
            state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
            if self.amsgrad:

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 72 Column: 70

                          state = self.state[param]
            state['step'] = torch.tensor(0.0)
            # Exponential moving average of gradient values
            state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
            # Exponential moving average of squared gradient values
            state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
            if self.amsgrad:
                # Maintains max of all exp. moving avg. of sq. grad. values
                state['max_exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 72 Column: 32

                          state = self.state[param]
            state['step'] = torch.tensor(0.0)
            # Exponential moving average of gradient values
            state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
            # Exponential moving average of squared gradient values
            state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
            if self.amsgrad:
                # Maintains max of all exp. moving avg. of sq. grad. values
                state['max_exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 74 Column: 73

                          # Exponential moving average of gradient values
            state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
            # Exponential moving average of squared gradient values
            state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
            if self.amsgrad:
                # Maintains max of all exp. moving avg. of sq. grad. values
                state['max_exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)

        state = self.state[param]

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 74 Column: 35

                          # Exponential moving average of gradient values
            state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
            # Exponential moving average of squared gradient values
            state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
            if self.amsgrad:
                # Maintains max of all exp. moving avg. of sq. grad. values
                state['max_exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)

        state = self.state[param]

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 77 Column: 81

                          state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
            if self.amsgrad:
                # Maintains max of all exp. moving avg. of sq. grad. values
                state['max_exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)

        state = self.state[param]

        exp_avgs.append(state['exp_avg'])
        exp_avg_sqs.append(state['exp_avg_sq'])

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 77 Column: 43

                          state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
            if self.amsgrad:
                # Maintains max of all exp. moving avg. of sq. grad. values
                state['max_exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)

        state = self.state[param]

        exp_avgs.append(state['exp_avg'])
        exp_avg_sqs.append(state['exp_avg_sq'])

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 129 Column: 37

                              if param not in self.state:
                    self.state[param] = {}
                    state = self.state[param]
                    state['step'] = torch.tensor(0.0)
                    # Exponential moving average of gradient values
                    state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    # Exponential moving average of squared gradient values
                    state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if self.amsgrad:

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 131 Column: 40

                                  state = self.state[param]
                    state['step'] = torch.tensor(0.0)
                    # Exponential moving average of gradient values
                    state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    # Exponential moving average of squared gradient values
                    state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if self.amsgrad:
                        # Maintains max of all exp. moving avg. of sq. grad. values
                        state['max_exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 131 Column: 78

                                  state = self.state[param]
                    state['step'] = torch.tensor(0.0)
                    # Exponential moving average of gradient values
                    state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    # Exponential moving average of squared gradient values
                    state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    if self.amsgrad:
                        # Maintains max of all exp. moving avg. of sq. grad. values
                        state['max_exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)

            

Reported by Pylint.

torch/optim/swa_utils.py
28 issues
Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 93 Column: 30

                      if device is not None:
            self.module = self.module.to(device)
        self.register_buffer('n_averaged',
                             torch.tensor(0, dtype=torch.long, device=device))
        if avg_fn is None:
            def avg_fn(averaged_model_parameter, model_parameter, num_averaged):
                return averaged_model_parameter + \
                    (model_parameter - averaged_model_parameter) / (num_averaged + 1)
        self.avg_fn = avg_fn

            

Reported by Pylint.

Module 'torch' has no 'long' member
Error

Line: 93 Column: 52

                      if device is not None:
            self.module = self.module.to(device)
        self.register_buffer('n_averaged',
                             torch.tensor(0, dtype=torch.long, device=device))
        if avg_fn is None:
            def avg_fn(averaged_model_parameter, model_parameter, num_averaged):
                return averaged_model_parameter + \
                    (model_parameter - averaged_model_parameter) / (num_averaged + 1)
        self.avg_fn = avg_fn

            

Reported by Pylint.

Instance of 'AveragedModel' has no 'n_averaged' member
Error

Line: 107 Column: 16

                      for p_swa, p_model in zip(self.parameters(), model.parameters()):
            device = p_swa.device
            p_model_ = p_model.detach().to(device)
            if self.n_averaged == 0:
                p_swa.detach().copy_(p_model_)
            else:
                p_swa.detach().copy_(self.avg_fn(p_swa.detach(), p_model_,
                                                 self.n_averaged.to(device)))
        self.n_averaged += 1

            

Reported by Pylint.

Instance of 'AveragedModel' has no 'n_averaged' member
Error

Line: 111 Column: 50

                              p_swa.detach().copy_(p_model_)
            else:
                p_swa.detach().copy_(self.avg_fn(p_swa.detach(), p_model_,
                                                 self.n_averaged.to(device)))
        self.n_averaged += 1


@torch.no_grad()
def update_bn(loader, model, device=None):

            

Reported by Pylint.

Instance of 'AveragedModel' has no 'n_averaged' member
Error

Line: 112 Column: 9

                          else:
                p_swa.detach().copy_(self.avg_fn(p_swa.detach(), p_model_,
                                                 self.n_averaged.to(device)))
        self.n_averaged += 1


@torch.no_grad()
def update_bn(loader, model, device=None):
    r"""Updates BatchNorm running_mean, running_var buffers in the model.

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 144 Column: 35

                  momenta = {}
    for module in model.modules():
        if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
            module.running_mean = torch.zeros_like(module.running_mean)
            module.running_var = torch.ones_like(module.running_var)
            momenta[module] = module.momentum

    if not momenta:
        return

            

Reported by Pylint.

Module 'torch' has no 'ones_like' member
Error

Line: 145 Column: 34

                  for module in model.modules():
        if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
            module.running_mean = torch.zeros_like(module.running_mean)
            module.running_var = torch.ones_like(module.running_var)
            momenta[module] = module.momentum

    if not momenta:
        return


            

Reported by Pylint.

Instance of 'SWALR' has no '_get_lr_called_within_step' member
Error

Line: 257 Column: 16

                      return (lr - alpha * swa_lr) / (1 - alpha)

    def get_lr(self):
        if not self._get_lr_called_within_step:
            warnings.warn("To get the last learning rate computed by the scheduler, "
                          "please use `get_last_lr()`.", UserWarning)
        step = self._step_count - 1
        if self.anneal_epochs == 0:
            step = max(1, step)

            

Reported by Pylint.

Undefined variable 'warnings'
Error

Line: 258 Column: 13

              
    def get_lr(self):
        if not self._get_lr_called_within_step:
            warnings.warn("To get the last learning rate computed by the scheduler, "
                          "please use `get_last_lr()`.", UserWarning)
        step = self._step_count - 1
        if self.anneal_epochs == 0:
            step = max(1, step)
        prev_t = max(0, min(1, (step - 1) / max(1, self.anneal_epochs)))

            

Reported by Pylint.

Access to a protected member _BatchNorm of a client class
Error

Line: 143 Column: 31

                  """
    momenta = {}
    for module in model.modules():
        if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
            module.running_mean = torch.zeros_like(module.running_mean)
            module.running_var = torch.ones_like(module.running_var)
            momenta[module] = module.momentum

    if not momenta:

            

Reported by Pylint.

caffe2/python/helpers/conv.py
28 issues
Unused variable 'concat_dims'
Error

Line: 356 Column: 13

                              **kwargs
            )
        )
    concat, concat_dims = model.net.Concat(
        conv_blobs,
        [blob_out,
         "_" + blob_out + "_concat_dims"],
        order=order
    )

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              ## @package conv
# Module caffe2.python.helpers.conv





from caffe2.python import core
from caffe2.python.modeling import initializers

            

Reported by Pylint.

Function name "_ConvBase" doesn't conform to snake_case naming style
Error

Line: 12 Column: 1

              from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags

def _ConvBase(
    model,
    is_nd,
    blob_in,
    blob_out,
    dim_in,

            

Reported by Pylint.

Too many arguments (18/5)
Error

Line: 12 Column: 1

              from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags

def _ConvBase(
    model,
    is_nd,
    blob_in,
    blob_out,
    dim_in,

            

Reported by Pylint.

Argument name "BiasInitializer" doesn't conform to snake_case naming style
Error

Line: 12 Column: 1

              from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags

def _ConvBase(
    model,
    is_nd,
    blob_in,
    blob_out,
    dim_in,

            

Reported by Pylint.

Argument name "WeightInitializer" doesn't conform to snake_case naming style
Error

Line: 12 Column: 1

              from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags

def _ConvBase(
    model,
    is_nd,
    blob_in,
    blob_out,
    dim_in,

            

Reported by Pylint.

Too many branches (23/12)
Error

Line: 12 Column: 1

              from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags

def _ConvBase(
    model,
    is_nd,
    blob_in,
    blob_out,
    dim_in,

            

Reported by Pylint.

Too many statements (55/50)
Error

Line: 12 Column: 1

              from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags

def _ConvBase(
    model,
    is_nd,
    blob_in,
    blob_out,
    dim_in,

            

Reported by Pylint.

Too many local variables (26/15)
Error

Line: 12 Column: 1

              from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags

def _ConvBase(
    model,
    is_nd,
    blob_in,
    blob_out,
    dim_in,

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 41
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                          kernels = kernel
    else:
        if isinstance(kernel, list):
            assert len(kernel) == 2, "Conv support only a 2D kernel."
            kernels = kernel
        else:
            kernels = [kernel] * 2

    requested_engine = kwargs.get('engine')

            

Reported by Bandit.