The following issues were found

test/distributed/pipeline/sync/skip/test_portal.py
47 issues
Unable to import 'pytest'
Error

Line: 7 Column: 1

              #
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch

from torch.distributed.pipeline.sync.dependency import fork, join
from torch.distributed.pipeline.sync.skip.portal import Portal
from torch.distributed.pipeline.sync.stream import default_stream

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 8 Column: 1

              # This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch

from torch.distributed.pipeline.sync.dependency import fork, join
from torch.distributed.pipeline.sync.skip.portal import Portal
from torch.distributed.pipeline.sync.stream import default_stream


            

Reported by Pylint.

Unable to import 'torch.distributed.pipeline.sync.dependency'
Error

Line: 10 Column: 1

              import pytest
import torch

from torch.distributed.pipeline.sync.dependency import fork, join
from torch.distributed.pipeline.sync.skip.portal import Portal
from torch.distributed.pipeline.sync.stream import default_stream


@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")

            

Reported by Pylint.

Unable to import 'torch.distributed.pipeline.sync.skip.portal'
Error

Line: 11 Column: 1

              import torch

from torch.distributed.pipeline.sync.dependency import fork, join
from torch.distributed.pipeline.sync.skip.portal import Portal
from torch.distributed.pipeline.sync.stream import default_stream


@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_copy_returns_on_next_device():

            

Reported by Pylint.

Unable to import 'torch.distributed.pipeline.sync.stream'
Error

Line: 12 Column: 1

              
from torch.distributed.pipeline.sync.dependency import fork, join
from torch.distributed.pipeline.sync.skip.portal import Portal
from torch.distributed.pipeline.sync.stream import default_stream


@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_copy_returns_on_next_device():
    portal = Portal(torch.rand(1), tensor_life=1)

            

Reported by Pylint.

Unused variable 'tensor'
Error

Line: 112 Column: 17

                      assert portal.tensor is None

    def test_tensor_life_0(self, new_portal):
        portal, tensor = new_portal(0)
        assert portal.tensor is None

    def test_tensor_life_1(self, new_portal):
        portal, tensor = new_portal(1)
        assert portal.tensor is tensor

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 16 Column: 1

              

@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_copy_returns_on_next_device():
    portal = Portal(torch.rand(1), tensor_life=1)

    prev_stream = default_stream(torch.device("cpu"))
    next_stream = default_stream(torch.device("cuda"))


            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 23
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                  next_stream = default_stream(torch.device("cuda"))

    phony = torch.zeros(0, requires_grad=True)
    assert phony.device.type == "cpu"

    phony = portal.copy(prev_stream, next_stream, phony)
    assert phony.device.type == "cuda"



            

Reported by Bandit.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 26
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                  assert phony.device.type == "cpu"

    phony = portal.copy(prev_stream, next_stream, phony)
    assert phony.device.type == "cuda"


def test_blue_orange():
    tensor1 = torch.rand(1, requires_grad=True)
    tensor2 = torch.rand(1, requires_grad=True)

            

Reported by Bandit.

test/test_module_init.py
47 issues
Unable to import 'torch'
Error

Line: 2 Column: 1

              import inspect
import torch
from unittest import mock
from unittest.mock import MagicMock, patch
from torch.testing import floating_types
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import TestCase, run_tests


            

Reported by Pylint.

Unable to import 'torch.testing'
Error

Line: 5 Column: 1

              import torch
from unittest import mock
from unittest.mock import MagicMock, patch
from torch.testing import floating_types
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import TestCase, run_tests



            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_device_type'
Error

Line: 6 Column: 1

              from unittest import mock
from unittest.mock import MagicMock, patch
from torch.testing import floating_types
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import TestCase, run_tests


# Returns a database of args & kwargs that can be used to construct each module.

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_quantization'
Error

Line: 7 Column: 1

              from unittest.mock import MagicMock, patch
from torch.testing import floating_types
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import TestCase, run_tests


# Returns a database of args & kwargs that can be used to construct each module.
# Each entry is in class -> (args, kwargs) format.

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 8 Column: 1

              from torch.testing import floating_types
from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import TestCase, run_tests


# Returns a database of args & kwargs that can be used to construct each module.
# Each entry is in class -> (args, kwargs) format.
# Example: torch.nn.Linear -> ([10, 5], {})

            

Reported by Pylint.

TODO: Merge this in with the initial ModuleInfo implementation.
Error

Line: 14 Column: 3

              # Returns a database of args & kwargs that can be used to construct each module.
# Each entry is in class -> (args, kwargs) format.
# Example: torch.nn.Linear -> ([10, 5], {})
# TODO: Merge this in with the initial ModuleInfo implementation.
def build_constructor_arg_db():
    return {
        torch.nn.AdaptiveAvgPool1d: ((5,), {}),
        torch.nn.AdaptiveAvgPool2d: ((5,), {}),
        torch.nn.AdaptiveAvgPool3d: ((5,), {}),

            

Reported by Pylint.

Redefining name 'mock' from outer scope (line 3)
Error

Line: 237 Column: 5

              # Returns a function that calls the real implementation of a method
# in addition to passing args to a mock object.
def mock_wrapper(method):
    mock = MagicMock()

    def wrapper(self, *args, **kwargs):
        mock(*args, **kwargs)
        return method(self, *args, **kwargs)
    wrapper.mock = mock

            

Reported by Pylint.

Unused argument 'test_cls'
Error

Line: 264 Column: 24

                  return args, kwargs


def generate_test_func(test_cls, module_cls, constructor_arg_db,
                       verify_kwargs=True, module_is_lazy=False, check_nonexistent_arg=True):
    # Generate a function for testing the given module.
    @dtypes(*floating_types())
    def run_test(test_cls, device, dtype, module_cls=module_cls):
        # Check if this module creates parameters or registers buffers.

            

Reported by Pylint.

Access to a protected member _ConvNd of a client class
Error

Line: 356 Column: 9

                      torch.nn.Module,
        torch.nn.Container,  # deprecated
        torch.nn.NLLLoss2d,  # deprecated
        torch.nn.quantized._ConvNd,  # base class in __all__ for some reason
        # TODO: Remove these 2 from this list once the ASan issue is fixed.
        # See https://github.com/pytorch/pytorch/issues/55396
        torch.nn.quantized.Embedding,
        torch.nn.quantized.EmbeddingBag,
    }

            

Reported by Pylint.

TODO: Remove these 2 from this list once the ASan issue is fixed.
Error

Line: 357 Column: 3

                      torch.nn.Container,  # deprecated
        torch.nn.NLLLoss2d,  # deprecated
        torch.nn.quantized._ConvNd,  # base class in __all__ for some reason
        # TODO: Remove these 2 from this list once the ASan issue is fixed.
        # See https://github.com/pytorch/pytorch/issues/55396
        torch.nn.quantized.Embedding,
        torch.nn.quantized.EmbeddingBag,
    }
    # no need to support kwargs for these modules even though

            

Reported by Pylint.

caffe2/contrib/playground/ModuleRegister.py
47 issues
Use lazy % formatting in logging functions
Error

Line: 17 Column: 14

              
def registerModuleMap(module_map):
    MODULE_MAPS.append(module_map)
    log.info("ModuleRegister get modules from  ModuleMap content: {}".
             format(inspect.getsource(module_map)))


def constructTrainerClass(myTrainerClass, opts):


            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 23 Column: 14

              
def constructTrainerClass(myTrainerClass, opts):

    log.info("ModuleRegister, myTrainerClass name is {}".
             format(myTrainerClass.__name__))
    log.info("ModuleRegister, myTrainerClass type is {}".
             format(type(myTrainerClass)))
    log.info("ModuleRegister, myTrainerClass dir is {}".
             format(dir(myTrainerClass)))

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 25 Column: 14

              
    log.info("ModuleRegister, myTrainerClass name is {}".
             format(myTrainerClass.__name__))
    log.info("ModuleRegister, myTrainerClass type is {}".
             format(type(myTrainerClass)))
    log.info("ModuleRegister, myTrainerClass dir is {}".
             format(dir(myTrainerClass)))

    myInitializeModelModule = getModule(opts['model']['model_name_py'])

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 27 Column: 14

                           format(myTrainerClass.__name__))
    log.info("ModuleRegister, myTrainerClass type is {}".
             format(type(myTrainerClass)))
    log.info("ModuleRegister, myTrainerClass dir is {}".
             format(dir(myTrainerClass)))

    myInitializeModelModule = getModule(opts['model']['model_name_py'])
    log.info("ModuleRegister, myInitializeModelModule dir is {}".
             format(dir(myInitializeModelModule)))

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 31 Column: 14

                           format(dir(myTrainerClass)))

    myInitializeModelModule = getModule(opts['model']['model_name_py'])
    log.info("ModuleRegister, myInitializeModelModule dir is {}".
             format(dir(myInitializeModelModule)))

    myTrainerClass.init_model = myInitializeModelModule.init_model
    myTrainerClass.run_training_net = myInitializeModelModule.run_training_net
    myTrainerClass.fun_per_iter_b4RunNet = \

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 42 Column: 14

                      myInitializeModelModule.fun_per_epoch_b4RunNet

    myInputModule = getModule(opts['input']['input_name_py'])
    log.info("ModuleRegister, myInputModule {} dir is {}".
             format(opts['input']['input_name_py'], myInputModule.__name__))

    # Override input methods of the myTrainerClass class
    myTrainerClass.get_input_dataset = myInputModule.get_input_dataset
    myTrainerClass.get_model_input_fun = myInputModule.get_model_input_fun

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 73 Column: 14

                  # override output module
    myOutputModule = getModule(opts['output']['gen_output_py'])

    log.info("ModuleRegister, myOutputModule is {}".
             format(myOutputModule.__name__))
    myTrainerClass.fun_conclude_operator = myOutputModule.fun_conclude_operator
    myTrainerClass.assembleAllOutputs = myOutputModule.assembleAllOutputs

    return myTrainerClass

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 82 Column: 14

              

def overrideAdditionalMethods(myTrainerClass, opts):
    log.info("B4 additional override myTrainerClass source {}".
        format(inspect.getsource(myTrainerClass)))
    # override any additional modules
    myAdditionalOverride = getModule(opts['model']['additional_override_py'])
    if myAdditionalOverride is not None:
        for funcName, funcValue in inspect.getmembers(myAdditionalOverride,

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 90 Column: 14

                      for funcName, funcValue in inspect.getmembers(myAdditionalOverride,
                                                      inspect.isfunction):
            setattr(myTrainerClass, funcName, funcValue)
    log.info("Aft additional override myTrainerClass's source {}".
        format(inspect.getsource(myTrainerClass)))
    return myTrainerClass


def getModule(moduleName):

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 96 Column: 14

              

def getModule(moduleName):
    log.info("get module {} from MODULE_MAPS content {}".format(moduleName, str(MODULE_MAPS)))
    myModule = None
    for ModuleMap in MODULE_MAPS:
        log.info("iterate through MODULE_MAPS content {}".
                 format(str(ModuleMap)))
        for name, obj in inspect.getmembers(ModuleMap):

            

Reported by Pylint.

torch/distributed/run.py
46 issues
Use lazy % formatting in logging functions
Error

Line: 541 Column: 9

              
def determine_local_world_size(nproc_per_node: str):
    try:
        logging.info(f"Using nproc_per_node={nproc_per_node}.")
        return int(nproc_per_node)
    except ValueError:
        if nproc_per_node == "cpu":
            num_proc = os.cpu_count()
            device_type = "cpu"

            

Reported by Pylint.

Consider explicitly re-raising using the 'from' keyword
Error

Line: 549 Column: 17

                          device_type = "cpu"
        elif nproc_per_node == "gpu":
            if not torch.cuda.is_available():
                raise ValueError("Cuda is not available.")
            device_type = "gpu"
            num_proc = torch.cuda.device_count()
        elif nproc_per_node == "auto":
            if torch.cuda.is_available():
                num_proc = torch.cuda.device_count()

            

Reported by Pylint.

Consider explicitly re-raising using the 'from' keyword
Error

Line: 560 Column: 13

                              num_proc = os.cpu_count()
                device_type = "cpu"
        else:
            raise ValueError(f"Unsupported nproc_per_node value: {nproc_per_node}")

        log.info(
            f"Using nproc_per_node={nproc_per_node},"
            f" seting to {num_proc} since the instance "
            f"has {os.cpu_count()} {device_type}"

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 562 Column: 9

                      else:
            raise ValueError(f"Unsupported nproc_per_node value: {nproc_per_node}")

        log.info(
            f"Using nproc_per_node={nproc_per_node},"
            f" seting to {num_proc} since the instance "
            f"has {os.cpu_count()} {device_type}"
        )
        return num_proc

            

Reported by Pylint.

Redefining name 'sys' from outer scope (line 310)
Error

Line: 667 Column: 5

                  Usage: `script_as_function("/abs/path/to/script.py", "--arg1", "val1")`
    """
    import runpy
    import sys

    sys.argv = [training_script] + [*training_script_args]
    runpy.run_path(sys.argv[0], run_name="__main__")



            

Reported by Pylint.

Reimport 'sys' (imported line 310)
Error

Line: 667 Column: 5

                  Usage: `script_as_function("/abs/path/to/script.py", "--arg1", "val1")`
    """
    import runpy
    import sys

    sys.argv = [training_script] + [*training_script_args]
    runpy.run_path(sys.argv[0], run_name="__main__")



            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 678 Column: 9

                      args.rdzv_backend = "c10d"
        args.rdzv_endpoint = "localhost:29400"
        args.rdzv_id = str(uuid.uuid4())
        log.info(
            f"\n**************************************\n"
            f"Rendezvous info:\n"
            f"--rdzv_backend={args.rdzv_backend} "
            f"--rdzv_endpoint={args.rdzv_endpoint} "
            f"--rdzv_id={args.rdzv_id}\n"

            

Reported by Pylint.

Line too long (107/100)
Error

Line: 29 Column: 1

              for ``--use_env`` which is now deprecated. To migrate from ``torch.distributed.launch``
to ``torch.distributed.run`` follow these steps:

1.  If your training script is already reading ``local_rank`` from the ``LOCAL_RANK`` environment variable.
    Then you need simply omit the ``--use_env`` flag, e.g.:

    +--------------------------------------------------------------------+------------------------------------------------------+
    |         ``torch.distributed.launch``                               |            ``torch.distributed.run``                 |
    +====================================================================+======================================================+

            

Reported by Pylint.

Line too long (129/100)
Error

Line: 32 Column: 1

              1.  If your training script is already reading ``local_rank`` from the ``LOCAL_RANK`` environment variable.
    Then you need simply omit the ``--use_env`` flag, e.g.:

    +--------------------------------------------------------------------+------------------------------------------------------+
    |         ``torch.distributed.launch``                               |            ``torch.distributed.run``                 |
    +====================================================================+======================================================+
    |                                                                    |                                                      |
    | .. code-block:: shell-session                                      | .. code-block:: shell-session                        |
    |                                                                    |                                                      |

            

Reported by Pylint.

Line too long (129/100)
Error

Line: 33 Column: 1

                  Then you need simply omit the ``--use_env`` flag, e.g.:

    +--------------------------------------------------------------------+------------------------------------------------------+
    |         ``torch.distributed.launch``                               |            ``torch.distributed.run``                 |
    +====================================================================+======================================================+
    |                                                                    |                                                      |
    | .. code-block:: shell-session                                      | .. code-block:: shell-session                        |
    |                                                                    |                                                      |
    |    $ python -m torch.distributed.launch --use_env train_script.py  |    $ python -m torch.distributed.run train_script.py |

            

Reported by Pylint.

torch/testing/_internal/common_modules.py
46 issues
Module 'torch' has no 'mm' member
Error

Line: 161 Column: 50

                  module_inputs = [
        ModuleInput(constructor_input=FunctionInput(10, 8),
                    forward_input=FunctionInput(make_input((4, 10))),
                    reference_fn=lambda m, p, i: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8)),
        ModuleInput(constructor_input=FunctionInput(10, 8, bias=False),
                    forward_input=FunctionInput(make_input((4, 10))),
                    desc='no_bias',
                    reference_fn=lambda m, p, i: torch.mm(i, p[0].t())),
        ModuleInput(constructor_input=FunctionInput(3, 5),

            

Reported by Pylint.

Module 'torch' has no 'mm' member
Error

Line: 165 Column: 50

                      ModuleInput(constructor_input=FunctionInput(10, 8, bias=False),
                    forward_input=FunctionInput(make_input((4, 10))),
                    desc='no_bias',
                    reference_fn=lambda m, p, i: torch.mm(i, p[0].t())),
        ModuleInput(constructor_input=FunctionInput(3, 5),
                    forward_input=FunctionInput(make_input(3)),
                    desc='no_batch_dim',
                    reference_fn=lambda m, p, i: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1])
    ]

            

Reported by Pylint.

Module 'torch' has no 'mm' member
Error

Line: 169 Column: 50

                      ModuleInput(constructor_input=FunctionInput(3, 5),
                    forward_input=FunctionInput(make_input(3)),
                    desc='no_batch_dim',
                    reference_fn=lambda m, p, i: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1])
    ]

    return module_inputs



            

Reported by Pylint.

Module 'torch' has no 'empty' member
Error

Line: 194 Column: 53

                      module_inputs.append(
            ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),
                        forward_input=FunctionInput(make_input((15, 10)).log_softmax(dim=1),
                                                    torch.empty(15, device=device).uniform_().mul(10).floor().long()),
                        desc=desc,
                        reference_fn=reference_fn)
        )

    return module_inputs

            

Reported by Pylint.

Access to a protected member _ConvNd of a client class
Error

Line: 27 Column: 5

                  torch.nn.Module,  # abstract base class
    torch.nn.Container,  # deprecated
    torch.nn.NLLLoss2d,  # deprecated
    torch.nn.quantized.modules._ConvNd,  # abstract base class
    torch.nn.quantized.MaxPool2d,  # aliases to nn.MaxPool2d
}

# List of all module classes to test.
MODULE_CLASSES: List[Type] = list(chain(*[

            

Reported by Pylint.

TODO: Factor some of this out since it's similar to OpInfo.
Error

Line: 54 Column: 3

              
    def _parametrize_test(self, test, generic_cls, device_cls):
        for module_info in self.module_info_list:
            # TODO: Factor some of this out since it's similar to OpInfo.
            for dtype in floating_types():
                # Construct the test name.
                test_name = '{}_{}_{}{}'.format(test.__name__,
                                                module_info.name.replace('.', '_'),
                                                device_cls.device_type,

            

Reported by Pylint.

Redefining name 'module_cls' from outer scope (line 42)
Error

Line: 95 Column: 27

                                  raise ex


def formatted_module_name(module_cls):
    """ Returns the common name of the module class formatted for use in test names. """
    return MODULE_CLASS_NAMES[module_cls].replace('.', '_')


class FunctionInput(object):

            

Reported by Pylint.

Redefining name 'module_cls' from outer scope (line 42)
Error

Line: 136 Column: 18

                  """ Module information to be used in testing. """

    def __init__(self,
                 module_cls,  # Class object for the module under test
                 *,
                 module_inputs_func,  # Function to generate module inputs
                 skips=(),  # Indicates which tests to skip
                 decorators=None,  # Additional decorators to apply to generated tests
                 ):

            

Reported by Pylint.

Unused argument 'kwargs'
Error

Line: 155 Column: 1

                      return formatted_module_name(self.module_cls)


def module_inputs_torch_nn_Linear(module_info, device, dtype, requires_grad, **kwargs):
    make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)

    module_inputs = [
        ModuleInput(constructor_input=FunctionInput(10, 8),
                    forward_input=FunctionInput(make_input((4, 10))),

            

Reported by Pylint.

Unused argument 'module_info'
Error

Line: 155 Column: 35

                      return formatted_module_name(self.module_cls)


def module_inputs_torch_nn_Linear(module_info, device, dtype, requires_grad, **kwargs):
    make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)

    module_inputs = [
        ModuleInput(constructor_input=FunctionInput(10, 8),
                    forward_input=FunctionInput(make_input((4, 10))),

            

Reported by Pylint.

test/onnx/export_onnx_tests_generator.py
46 issues
Unable to import 'torch.autograd'
Error

Line: 1 Column: 1

              from torch.autograd import Variable
from onnx import numpy_helper

import io
import onnx
import os
import shutil
import torch
import traceback

            

Reported by Pylint.

Unable to import 'onnx'
Error

Line: 2 Column: 1

              from torch.autograd import Variable
from onnx import numpy_helper

import io
import onnx
import os
import shutil
import torch
import traceback

            

Reported by Pylint.

Unable to import 'onnx'
Error

Line: 5 Column: 1

              from onnx import numpy_helper

import io
import onnx
import os
import shutil
import torch
import traceback


            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 8 Column: 1

              import onnx
import os
import shutil
import torch
import traceback

import test_onnx_common
from torch.testing._internal.common_nn import module_tests
from test_nn import new_module_tests

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_nn'
Error

Line: 12 Column: 1

              import traceback

import test_onnx_common
from torch.testing._internal.common_nn import module_tests
from test_nn import new_module_tests


# Take a test case (a dict) as input, return the test name.
def get_test_name(testcase):

            

Reported by Pylint.

Unable to import 'test_nn'
Error

Line: 13 Column: 1

              
import test_onnx_common
from torch.testing._internal.common_nn import module_tests
from test_nn import new_module_tests


# Take a test case (a dict) as input, return the test name.
def get_test_name(testcase):
    if "fullname" in testcase:

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 34 Column: 9

                          testcase["input_size"] = (1,)
        return Variable(torch.randn(*testcase["input_size"]))
    elif "input_fn" in testcase:
        input = testcase["input_fn"]()
        if isinstance(input, Variable):
            return input
        return Variable(testcase["input_fn"]())



            

Reported by Pylint.

Redefining name 'testcases' from outer scope (line 139)
Error

Line: 81 Column: 19

                      fun(info, l)


def convert_tests(testcases, sets=1):
    print("Collect {} test cases from PyTorch.".format(len(testcases)))
    failed = 0
    FunctionalModule_nums = 0
    nn_module = {}
    for t in testcases:

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 96 Column: 13

                          if (module_name not in nn_module):
                nn_module[module_name] = 0
        try:
            input = gen_input(t)
            f = io.BytesIO()
            torch.onnx._export(module, input, f,
                               operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
            onnx_model = onnx.load_from_string(f.getvalue())
            onnx.checker.check_model(onnx_model)

            

Reported by Pylint.

Access to a protected member _export of a client class
Error

Line: 98 Column: 13

                      try:
            input = gen_input(t)
            f = io.BytesIO()
            torch.onnx._export(module, input, f,
                               operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
            onnx_model = onnx.load_from_string(f.getvalue())
            onnx.checker.check_model(onnx_model)
            onnx.helper.strip_doc_string(onnx_model)
            output_dir = os.path.join(test_onnx_common.pytorch_converted_dir, test_name)

            

Reported by Pylint.

caffe2/python/models/seq2seq/seq2seq_util.py
46 issues
Unable to import 'caffe2.proto.caffe2_pb2'
Error

Line: 13 Column: 1

              import collections
from future.utils import viewitems

import caffe2.proto.caffe2_pb2 as caffe2_pb2
from caffe2.python import attention, core, rnn_cell, brew


PAD_ID = 0
PAD = '<PAD>'

            

Reported by Pylint.

Statement seems to have no effect
Error

Line: 31 Column: 5

                  vocab = collections.defaultdict(lambda: len(vocab))
    freqs = collections.defaultdict(lambda: 0)
    # Adding padding tokens to the vocabulary to maintain consistency with IDs
    vocab[PAD]
    vocab[GO]
    vocab[EOS]
    vocab[UNK]

    with open(corpus) as f:

            

Reported by Pylint.

Statement seems to have no effect
Error

Line: 32 Column: 5

                  freqs = collections.defaultdict(lambda: 0)
    # Adding padding tokens to the vocabulary to maintain consistency with IDs
    vocab[PAD]
    vocab[GO]
    vocab[EOS]
    vocab[UNK]

    with open(corpus) as f:
        for sentence in f:

            

Reported by Pylint.

Statement seems to have no effect
Error

Line: 33 Column: 5

                  # Adding padding tokens to the vocabulary to maintain consistency with IDs
    vocab[PAD]
    vocab[GO]
    vocab[EOS]
    vocab[UNK]

    with open(corpus) as f:
        for sentence in f:
            tokens = sentence.strip().split()

            

Reported by Pylint.

Statement seems to have no effect
Error

Line: 34 Column: 5

                  vocab[PAD]
    vocab[GO]
    vocab[EOS]
    vocab[UNK]

    with open(corpus) as f:
        for sentence in f:
            tokens = sentence.strip().split()
            for token in tokens:

            

Reported by Pylint.

Statement seems to have no effect
Error

Line: 43 Column: 13

                              freqs[token] += 1
    for token, freq in viewitems(freqs):
        if freq > unk_threshold:
            vocab[token]

    return vocab


def get_numberized_sentence(sentence, vocab):

            

Reported by Pylint.

Unused argument 'vocab_size'
Error

Line: 230 Column: 5

                  num_decoder_layers,
    inputs,
    input_lengths,
    vocab_size,
    embeddings,
    embedding_size,
    use_attention,
    num_gpus=0,
    forward_only=False,

            

Reported by Pylint.

Unused argument 'vocab_size'
Error

Line: 338 Column: 9

                      encoder_outputs,
        encoder_output_dim,
        encoder_lengths,
        vocab_size,
        attention_type,
        embedding_size,
        decoder_num_units,
        decoder_cells,
        residual_output_layers=None,

            

Reported by Pylint.

Unused argument 'embedding_size'
Error

Line: 340 Column: 9

                      encoder_lengths,
        vocab_size,
        attention_type,
        embedding_size,
        decoder_num_units,
        decoder_cells,
        residual_output_layers=None,
        name=None,
        weighted_encoder_outputs=None,

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 27 Column: 1

              UNK = '<UNK>'


def gen_vocab(corpus, unk_threshold):
    vocab = collections.defaultdict(lambda: len(vocab))
    freqs = collections.defaultdict(lambda: 0)
    # Adding padding tokens to the vocabulary to maintain consistency with IDs
    vocab[PAD]
    vocab[GO]

            

Reported by Pylint.

test/scripts/run_cuda_memcheck.py
46 issues
Unable to import 'torch'
Error

Line: 16 Column: 1

              """

import asyncio
import torch
import multiprocessing
import argparse
import subprocess
import tqdm
import os

            

Reported by Pylint.

Unable to import 'tqdm'
Error

Line: 20 Column: 1

              import multiprocessing
import argparse
import subprocess
import tqdm
import os
import sys
import cuda_memcheck_common as cmc

ALL_TESTS = []

            

Reported by Pylint.

TODO (@zasdfgbnm): When can we remove this? Will cublas/cudnn run error-free under cuda-memcheck?
Error

Line: 49 Column: 3

              args = parser.parse_args()

# Filters that ignores cublas/cudnn errors
# TODO (@zasdfgbnm): When can we remove this? Will cublas/cudnn run error-free under cuda-memcheck?
def is_ignored_only(output):
    try:
        report = cmc.parse(output)
    except cmc.ParseError:
        # in case the simple parser fails parsing the output of cuda memcheck

            

Reported by Pylint.

Redefining name 'args' from outer scope (line 46)
Error

Line: 111 Column: 1

              
    # create a fake progress bar that does not display anything
    class ProgressbarStub:
        def update(self, *args):
            return
    progressbar = ProgressbarStub()

async def run1(coroutine_id):
    global progress

            

Reported by Pylint.

Unused argument 'args'
Error

Line: 111 Column: 1

              
    # create a fake progress bar that does not display anything
    class ProgressbarStub:
        def update(self, *args):
            return
    progressbar = ProgressbarStub()

async def run1(coroutine_id):
    global progress

            

Reported by Pylint.

Using the global statement
Error

Line: 116 Column: 5

                  progressbar = ProgressbarStub()

async def run1(coroutine_id):
    global progress

    if args.gpus == 'all':
        gpuid = coroutine_id % GPUS
    else:
        gpu_assignments = args.gpus.split(':')

            

Reported by Pylint.

Redefining name 'proc' from outer scope (line 70)
Error

Line: 129 Column: 9

                      test = ALL_TESTS[progress]
        progress += 1
        cmd = f'CUDA_VISIBLE_DEVICES={gpuid} cuda-memcheck --error-exitcode 1 python {args.filename} {test}'
        proc = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
        try:
            stdout, stderr = await asyncio.wait_for(proc.communicate(), args.timeout)
        except asyncio.TimeoutError:
            print('Timeout:', test, file=logfile)
            proc.kill()

            

Reported by Pylint.

Redefining name 'stdout' from outer scope (line 71)
Error

Line: 131 Column: 13

                      cmd = f'CUDA_VISIBLE_DEVICES={gpuid} cuda-memcheck --error-exitcode 1 python {args.filename} {test}'
        proc = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
        try:
            stdout, stderr = await asyncio.wait_for(proc.communicate(), args.timeout)
        except asyncio.TimeoutError:
            print('Timeout:', test, file=logfile)
            proc.kill()
            if args.ci and not args.nohang:
                sys.exit("Hang detected on cuda-memcheck")

            

Reported by Pylint.

Redefining name 'stderr' from outer scope (line 71)
Error

Line: 131 Column: 21

                      cmd = f'CUDA_VISIBLE_DEVICES={gpuid} cuda-memcheck --error-exitcode 1 python {args.filename} {test}'
        proc = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
        try:
            stdout, stderr = await asyncio.wait_for(proc.communicate(), args.timeout)
        except asyncio.TimeoutError:
            print('Timeout:', test, file=logfile)
            proc.kill()
            if args.ci and not args.nohang:
                sys.exit("Hang detected on cuda-memcheck")

            

Reported by Pylint.

standard import "import multiprocessing" should be placed before "import torch"
Error

Line: 17 Column: 1

              
import asyncio
import torch
import multiprocessing
import argparse
import subprocess
import tqdm
import os
import sys

            

Reported by Pylint.

caffe2/python/operator_test/box_with_nms_limit_op_test.py
46 issues
Unable to import 'hypothesis'
Error

Line: 9 Column: 1

              from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import unittest
import numpy as np



            

Reported by Pylint.

Unable to import 'hypothesis.strategies'
Error

Line: 10 Column: 1

              import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import unittest
import numpy as np


def get_op(input_len, output_len, args):

            

Reported by Pylint.

Redefining built-in 'len'
Error

Line: 39 Column: 5

              

def gen_boxes(count, center):
    len = 10
    len_half = len / 2.0
    ret = np.tile(
        np.array(
            [center[0] - len_half, center[1] - len_half,
            center[0] + len_half, center[1] + len_half]

            

Reported by Pylint.

Unused argument 'kwargs'
Error

Line: 80 Column: 1

              
        op = get_op(2, 3, {"score_thresh": 0.5, "nms": 0.9})

        def ref(*args, **kwargs):
            return (gt_scores.flatten(), gt_boxes, gt_classes)

        self.assertReferenceChecks(gc, op, [scores, boxes], ref)

    @given(**HU_CONFIG)

            

Reported by Pylint.

Unused argument 'args'
Error

Line: 80 Column: 1

              
        op = get_op(2, 3, {"score_thresh": 0.5, "nms": 0.9})

        def ref(*args, **kwargs):
            return (gt_scores.flatten(), gt_boxes, gt_classes)

        self.assertReferenceChecks(gc, op, [scores, boxes], ref)

    @given(**HU_CONFIG)

            

Reported by Pylint.

Unused argument 'kwargs'
Error

Line: 99 Column: 1

              
        op = get_op(2, 3, {"score_thresh": 0.8, "nms": 0.9})

        def ref(*args, **kwargs):
            return (gt_scores.flatten(), gt_boxes, gt_classes)

        self.assertReferenceChecks(gc, op, [scores, boxes], ref)

    @given(det_per_im=st.integers(1, 3), **HU_CONFIG)

            

Reported by Pylint.

Unused argument 'args'
Error

Line: 99 Column: 1

              
        op = get_op(2, 3, {"score_thresh": 0.8, "nms": 0.9})

        def ref(*args, **kwargs):
            return (gt_scores.flatten(), gt_boxes, gt_classes)

        self.assertReferenceChecks(gc, op, [scores, boxes], ref)

    @given(det_per_im=st.integers(1, 3), **HU_CONFIG)

            

Reported by Pylint.

Unused argument 'kwargs'
Error

Line: 121 Column: 1

                          {"score_thresh": 0.5, "nms": 0.9, "detections_per_im": det_per_im}
        )

        def ref(*args, **kwargs):
            return (gt_scores.flatten(), gt_boxes, gt_classes)

        self.assertReferenceChecks(gc, op, [scores, boxes], ref)

    @given(

            

Reported by Pylint.

Unused argument 'args'
Error

Line: 121 Column: 1

                          {"score_thresh": 0.5, "nms": 0.9, "detections_per_im": det_per_im}
        )

        def ref(*args, **kwargs):
            return (gt_scores.flatten(), gt_boxes, gt_classes)

        self.assertReferenceChecks(gc, op, [scores, boxes], ref)

    @given(

            

Reported by Pylint.

Unused argument 'args'
Error

Line: 183 Column: 1

                          }
        )

        def ref(*args, **kwargs):
            return (gt_scores, gt_boxes, gt_classes)

        self.assertReferenceChecks(gc, op, [scores, boxes], ref)

    @given(det_per_im=st.integers(1, 3), **HU_CONFIG)

            

Reported by Pylint.

caffe2/core/nomnigraph/op_gen.py
46 issues
Redefining name 'lines' from outer scope (line 223)
Error

Line: 13 Column: 17

              from subprocess import call


def parse_lines(lines):
    # States
    EMPTY = 0
    OP = 1
    MACRO = 2
    parse_state = EMPTY

            

Reported by Pylint.

Redefining name 'ops' from outer scope (line 228)
Error

Line: 63 Column: 5

                  curr_op = ""
    # dict of the form
    #  opName : { attributes: [], ... }
    ops = {}
    # To preserve parsing order for dependencies (for things like init_from)
    op_list = []

    for line in lines:
        if not len(line):

            

Reported by Pylint.

Redefining name 'op_list' from outer scope (line 228)
Error

Line: 65 Column: 5

                  #  opName : { attributes: [], ... }
    ops = {}
    # To preserve parsing order for dependencies (for things like init_from)
    op_list = []

    for line in lines:
        if not len(line):
            continue
        if line[0] == "-":

            

Reported by Pylint.

Redefining name 'op_list' from outer scope (line 228)
Error

Line: 190 Column: 22

                  )


def gen_classes(ops, op_list):
    f = ""
    for op in op_list:
        f += gen_class(op, ops[op])
    return f


            

Reported by Pylint.

Redefining name 'ops' from outer scope (line 228)
Error

Line: 190 Column: 17

                  )


def gen_classes(ops, op_list):
    f = ""
    for op in op_list:
        f += gen_class(op, ops[op])
    return f


            

Reported by Pylint.

Redefining name 'f' from outer scope (line 225)
Error

Line: 191 Column: 5

              

def gen_classes(ops, op_list):
    f = ""
    for op in op_list:
        f += gen_class(op, ops[op])
    return f



            

Reported by Pylint.

Redefining name 'op_list' from outer scope (line 228)
Error

Line: 197 Column: 14

                  return f


def gen_enum(op_list):
    return ",\n".join([op for op in op_list]) + "\n"


def gen_names(op_list):
    f = ""

            

Reported by Pylint.

Redefining name 'op_list' from outer scope (line 228)
Error

Line: 201 Column: 15

                  return ",\n".join([op for op in op_list]) + "\n"


def gen_names(op_list):
    f = ""
    for op in op_list:
        f += dedent(
            """
            case NNKind::{name}:

            

Reported by Pylint.

Redefining name 'f' from outer scope (line 225)
Error

Line: 202 Column: 5

              

def gen_names(op_list):
    f = ""
    for op in op_list:
        f += dedent(
            """
            case NNKind::{name}:
                return \"{name}\";

            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 244 Column: 12

                      call(cmd)
        cmd = ["clang-format", "-i", install_dir + "/OpEnum.h"]
        call(cmd)
    except Exception:
        pass

            

Reported by Pylint.