The following issues were found

test/test_optim.py
479 issues
Unable to import 'torch'
Error

Line: 6 Column: 1

              import unittest
import functools
from copy import deepcopy
import torch
from torch._six import inf
import torch.optim as optim
import torch.optim._multi_tensor as optim_mt
import torch.nn.functional as F
from torch.optim import SGD

            

Reported by Pylint.

Unable to import 'torch._six'
Error

Line: 7 Column: 1

              import functools
from copy import deepcopy
import torch
from torch._six import inf
import torch.optim as optim
import torch.optim._multi_tensor as optim_mt
import torch.nn.functional as F
from torch.optim import SGD
from torch.autograd import Variable

            

Reported by Pylint.

Unable to import 'torch.optim'
Error

Line: 8 Column: 1

              from copy import deepcopy
import torch
from torch._six import inf
import torch.optim as optim
import torch.optim._multi_tensor as optim_mt
import torch.nn.functional as F
from torch.optim import SGD
from torch.autograd import Variable
from torch import sparse

            

Reported by Pylint.

Unable to import 'torch.optim._multi_tensor'
Error

Line: 9 Column: 1

              import torch
from torch._six import inf
import torch.optim as optim
import torch.optim._multi_tensor as optim_mt
import torch.nn.functional as F
from torch.optim import SGD
from torch.autograd import Variable
from torch import sparse
from torch.optim.lr_scheduler import LambdaLR, MultiplicativeLR, StepLR, \

            

Reported by Pylint.

Unable to import 'torch.nn.functional'
Error

Line: 10 Column: 1

              from torch._six import inf
import torch.optim as optim
import torch.optim._multi_tensor as optim_mt
import torch.nn.functional as F
from torch.optim import SGD
from torch.autograd import Variable
from torch import sparse
from torch.optim.lr_scheduler import LambdaLR, MultiplicativeLR, StepLR, \
    MultiStepLR, WarmUpLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, \

            

Reported by Pylint.

Unable to import 'torch.optim'
Error

Line: 11 Column: 1

              import torch.optim as optim
import torch.optim._multi_tensor as optim_mt
import torch.nn.functional as F
from torch.optim import SGD
from torch.autograd import Variable
from torch import sparse
from torch.optim.lr_scheduler import LambdaLR, MultiplicativeLR, StepLR, \
    MultiStepLR, WarmUpLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, \
    _LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR

            

Reported by Pylint.

Unable to import 'torch.autograd'
Error

Line: 12 Column: 1

              import torch.optim._multi_tensor as optim_mt
import torch.nn.functional as F
from torch.optim import SGD
from torch.autograd import Variable
from torch import sparse
from torch.optim.lr_scheduler import LambdaLR, MultiplicativeLR, StepLR, \
    MultiStepLR, WarmUpLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, \
    _LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 13 Column: 1

              import torch.nn.functional as F
from torch.optim import SGD
from torch.autograd import Variable
from torch import sparse
from torch.optim.lr_scheduler import LambdaLR, MultiplicativeLR, StepLR, \
    MultiStepLR, WarmUpLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, \
    _LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_UBSAN, load_tests, \

            

Reported by Pylint.

Unable to import 'torch.optim.lr_scheduler'
Error

Line: 14 Column: 1

              from torch.optim import SGD
from torch.autograd import Variable
from torch import sparse
from torch.optim.lr_scheduler import LambdaLR, MultiplicativeLR, StepLR, \
    MultiStepLR, WarmUpLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, \
    _LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_UBSAN, load_tests, \
    skipIfRocm

            

Reported by Pylint.

Unable to import 'torch.optim.swa_utils'
Error

Line: 17 Column: 1

              from torch.optim.lr_scheduler import LambdaLR, MultiplicativeLR, StepLR, \
    MultiStepLR, WarmUpLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, \
    _LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_UBSAN, load_tests, \
    skipIfRocm

# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings

            

Reported by Pylint.

test/test_fx_experimental.py
463 issues
Unable to import 'torch'
Error

Line: 8 Column: 1

              import unittest
from typing import Callable, Dict, Union, List, Optional

import torch
import torch.fx.experimental.optimization as optimization
from torch.fx._symbolic_trace import symbolic_trace
from torch.fx.experimental import graph_manipulation
from torch.fx.experimental import merge_matmul
from torch.fx.experimental.accelerator_partitioner import Partitioner

            

Reported by Pylint.

Unable to import 'torch.fx.experimental.optimization'
Error

Line: 9 Column: 1

              from typing import Callable, Dict, Union, List, Optional

import torch
import torch.fx.experimental.optimization as optimization
from torch.fx._symbolic_trace import symbolic_trace
from torch.fx.experimental import graph_manipulation
from torch.fx.experimental import merge_matmul
from torch.fx.experimental.accelerator_partitioner import Partitioner
from torch.fx.experimental.normalize import NormalizeOperators, NormalizeArgs

            

Reported by Pylint.

Unable to import 'torch.fx._symbolic_trace'
Error

Line: 10 Column: 1

              
import torch
import torch.fx.experimental.optimization as optimization
from torch.fx._symbolic_trace import symbolic_trace
from torch.fx.experimental import graph_manipulation
from torch.fx.experimental import merge_matmul
from torch.fx.experimental.accelerator_partitioner import Partitioner
from torch.fx.experimental.normalize import NormalizeOperators, NormalizeArgs
from torch.fx.experimental.param_fetch import lift_lowering_attrs_to_nodes

            

Reported by Pylint.

Unable to import 'torch.fx.experimental'
Error

Line: 11 Column: 1

              import torch
import torch.fx.experimental.optimization as optimization
from torch.fx._symbolic_trace import symbolic_trace
from torch.fx.experimental import graph_manipulation
from torch.fx.experimental import merge_matmul
from torch.fx.experimental.accelerator_partitioner import Partitioner
from torch.fx.experimental.normalize import NormalizeOperators, NormalizeArgs
from torch.fx.experimental.param_fetch import lift_lowering_attrs_to_nodes
from torch.fx.experimental.partitioner_utils import (

            

Reported by Pylint.

Unable to import 'torch.fx.experimental'
Error

Line: 12 Column: 1

              import torch.fx.experimental.optimization as optimization
from torch.fx._symbolic_trace import symbolic_trace
from torch.fx.experimental import graph_manipulation
from torch.fx.experimental import merge_matmul
from torch.fx.experimental.accelerator_partitioner import Partitioner
from torch.fx.experimental.normalize import NormalizeOperators, NormalizeArgs
from torch.fx.experimental.param_fetch import lift_lowering_attrs_to_nodes
from torch.fx.experimental.partitioner_utils import (
    NodeLatency,

            

Reported by Pylint.

Unable to import 'torch.fx.experimental.accelerator_partitioner'
Error

Line: 13 Column: 1

              from torch.fx._symbolic_trace import symbolic_trace
from torch.fx.experimental import graph_manipulation
from torch.fx.experimental import merge_matmul
from torch.fx.experimental.accelerator_partitioner import Partitioner
from torch.fx.experimental.normalize import NormalizeOperators, NormalizeArgs
from torch.fx.experimental.param_fetch import lift_lowering_attrs_to_nodes
from torch.fx.experimental.partitioner_utils import (
    NodeLatency,
    get_partition_to_latency_mapping,

            

Reported by Pylint.

Unable to import 'torch.fx.experimental.normalize'
Error

Line: 14 Column: 1

              from torch.fx.experimental import graph_manipulation
from torch.fx.experimental import merge_matmul
from torch.fx.experimental.accelerator_partitioner import Partitioner
from torch.fx.experimental.normalize import NormalizeOperators, NormalizeArgs
from torch.fx.experimental.param_fetch import lift_lowering_attrs_to_nodes
from torch.fx.experimental.partitioner_utils import (
    NodeLatency,
    get_partition_to_latency_mapping,
    get_latency_of_partitioned_graph,

            

Reported by Pylint.

Unable to import 'torch.fx.experimental.param_fetch'
Error

Line: 15 Column: 1

              from torch.fx.experimental import merge_matmul
from torch.fx.experimental.accelerator_partitioner import Partitioner
from torch.fx.experimental.normalize import NormalizeOperators, NormalizeArgs
from torch.fx.experimental.param_fetch import lift_lowering_attrs_to_nodes
from torch.fx.experimental.partitioner_utils import (
    NodeLatency,
    get_partition_to_latency_mapping,
    get_latency_of_partitioned_graph,
    Device,

            

Reported by Pylint.

Unable to import 'torch.fx.experimental.partitioner_utils'
Error

Line: 16 Column: 1

              from torch.fx.experimental.accelerator_partitioner import Partitioner
from torch.fx.experimental.normalize import NormalizeOperators, NormalizeArgs
from torch.fx.experimental.param_fetch import lift_lowering_attrs_to_nodes
from torch.fx.experimental.partitioner_utils import (
    NodeLatency,
    get_partition_to_latency_mapping,
    get_latency_of_partitioned_graph,
    Device,
    PartitionerConfig,

            

Reported by Pylint.

Unable to import 'torch.fx.experimental.rewriter'
Error

Line: 24 Column: 1

                  PartitionerConfig,
    PartitionMode,
)
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.experimental.schema_type_annotation import AnnotateTypesWithSchema
from torch.fx.graph_module import GraphModule
from torch.fx.node import Node
from torch.fx.operator_schemas import (
    _torchscript_type_to_python_type,

            

Reported by Pylint.

test/jit/test_save_load.py
453 issues
Unable to import 'torch'
Error

Line: 9 Column: 1

              import random
import sys

from torch import Tensor
from torch.testing._internal.common_utils import TemporaryFileName
import torch

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 10 Column: 1

              import sys

from torch import Tensor
from torch.testing._internal.common_utils import TemporaryFileName
import torch

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 11 Column: 1

              
from torch import Tensor
from torch.testing._internal.common_utils import TemporaryFileName
import torch

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import (JitTestCase,

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 16 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import (JitTestCase,
                                               clear_class_registry)

if __name__ == "__main__":
    raise RuntimeError(
        "This test file is not meant to be run directly, use:\n\n"

            

Reported by Pylint.

Bad first argument 'Foo' given to super()
Error

Line: 570 Column: 17

                      """
        class Foo(torch.nn.Module):
            def __init__(self):
                super(Foo, self).__init__()
                self.foo = torch.nn.Linear(2, 2)
                self.bar = torch.nn.Linear(2, 2)

            def forward(self, x):
                x = self.foo(x)

            

Reported by Pylint.

class already defined line 568
Error

Line: 586 Column: 9

              
        clear_class_registry()

        class Foo(torch.nn.Module):
            def __init__(self):
                super(Foo, self).__init__()
                self.foo = torch.nn.Linear(2, 2)

            def forward(self, x):

            

Reported by Pylint.

function already defined line 628
Error

Line: 642 Column: 9

              
        clear_class_registry()

        def lol(x):  # noqa: F811
            return "hello"

        class Foo(torch.nn.Module):
            def forward(self, x):
                return lol(x)

            

Reported by Pylint.

class already defined line 631
Error

Line: 645 Column: 9

                      def lol(x):  # noqa: F811
            return "hello"

        class Foo(torch.nn.Module):
            def forward(self, x):
                return lol(x)

        second_script_module = torch.jit.script(Foo())
        second_saved_module = io.BytesIO()

            

Reported by Pylint.

Instance of 'ImplementInterface' has no 'bar' member
Error

Line: 703 Column: 24

                              self.interface = ImplementInterface()

            def forward(self, x):
                return self.interface.bar(x)

        first_script_module = torch.jit.script(Foo())
        first_saved_module = io.BytesIO()
        torch.jit.save(first_script_module, first_saved_module)
        first_saved_module.seek(0)

            

Reported by Pylint.

class already defined line 683
Error

Line: 713 Column: 9

                      clear_class_registry()

        @torch.jit.interface
        class MyInterface(object):
            def not_bar(self, x: Tensor) -> Tensor:
                pass

        @torch.jit.script  # noqa: F811
        class ImplementInterface(object):  # noqa: F811

            

Reported by Pylint.

test/jit/test_module_interface.py
447 issues
Unable to import 'torch'
Error

Line: 5 Column: 1

              # TODO: enable linting check for this file

from typing import List, Any
import torch
import torch.nn as nn
import os
import sys
from torch import Tensor
from torch.testing._internal.jit_utils import JitTestCase, make_global

            

Reported by Pylint.

Unable to import 'torch.nn'
Error

Line: 6 Column: 1

              
from typing import List, Any
import torch
import torch.nn as nn
import os
import sys
from torch import Tensor
from torch.testing._internal.jit_utils import JitTestCase, make_global


            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 9 Column: 1

              import torch.nn as nn
import os
import sys
from torch import Tensor
from torch.testing._internal.jit_utils import JitTestCase, make_global

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 10 Column: 1

              import os
import sys
from torch import Tensor
from torch.testing._internal.jit_utils import JitTestCase, make_global

# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, execWrapper

            

Reported by Pylint.

Unable to import 'torch.testing._internal.jit_utils'
Error

Line: 15 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, execWrapper

if __name__ == '__main__':
    raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
                       "\tpython test/test_jit.py TESTNAME\n\n"
                       "instead.")

            

Reported by Pylint.

TODO: enable linting check for this file
Error

Line: 2 Column: 3

              # flake8: noqa
# TODO: enable linting check for this file

from typing import List, Any
import torch
import torch.nn as nn
import os
import sys
from torch import Tensor

            

Reported by Pylint.

Unused execWrapper imported from torch.testing._internal.jit_utils
Error

Line: 15 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, execWrapper

if __name__ == '__main__':
    raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
                       "\tpython test/test_jit.py TESTNAME\n\n"
                       "instead.")

            

Reported by Pylint.

Reimport 'JitTestCase' (imported line 10)
Error

Line: 15 Column: 1

              # Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, execWrapper

if __name__ == '__main__':
    raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
                       "\tpython test/test_jit.py TESTNAME\n\n"
                       "instead.")

            

Reported by Pylint.

Useless super delegation in method '__init__'
Error

Line: 23 Column: 5

                                     "instead.")

class OrigModule(nn.Module):
    def __init__(self):
        super(OrigModule, self).__init__()

    def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
        return inp1 + inp2 + 1


            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 29 Column: 19

                  def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
        return inp1 + inp2 + 1

    def two(self, input: Tensor) -> Tensor:
        return input + 2

    def forward(self, input: Tensor) -> Tensor:
        return input + self.one(input, input) + 1


            

Reported by Pylint.

test/fx/test_gradual_type.py
446 issues
Unable to import 'torch'
Error

Line: 2 Column: 1

              import unittest
import torch
from torch.fx import symbolic_trace
from torch.fx.experimental.unify_refinements import infer_symbolic_types
from torch.fx.experimental.refinement_types import Equality
from torch.fx.tensor_type import TensorType, Dyn, is_consistent, is_more_precise
from torch.fx.annotate import annotate
from torch.fx.experimental.graph_gradual_typechecker import GraphTypeChecker, broadcast_types, Refine
from torch.fx.experimental.rewriter import RewritingTracer

            

Reported by Pylint.

Unable to import 'torch.fx'
Error

Line: 3 Column: 1

              import unittest
import torch
from torch.fx import symbolic_trace
from torch.fx.experimental.unify_refinements import infer_symbolic_types
from torch.fx.experimental.refinement_types import Equality
from torch.fx.tensor_type import TensorType, Dyn, is_consistent, is_more_precise
from torch.fx.annotate import annotate
from torch.fx.experimental.graph_gradual_typechecker import GraphTypeChecker, broadcast_types, Refine
from torch.fx.experimental.rewriter import RewritingTracer

            

Reported by Pylint.

Unable to import 'torch.fx.experimental.unify_refinements'
Error

Line: 4 Column: 1

              import unittest
import torch
from torch.fx import symbolic_trace
from torch.fx.experimental.unify_refinements import infer_symbolic_types
from torch.fx.experimental.refinement_types import Equality
from torch.fx.tensor_type import TensorType, Dyn, is_consistent, is_more_precise
from torch.fx.annotate import annotate
from torch.fx.experimental.graph_gradual_typechecker import GraphTypeChecker, broadcast_types, Refine
from torch.fx.experimental.rewriter import RewritingTracer

            

Reported by Pylint.

Unable to import 'torch.fx.experimental.refinement_types'
Error

Line: 5 Column: 1

              import torch
from torch.fx import symbolic_trace
from torch.fx.experimental.unify_refinements import infer_symbolic_types
from torch.fx.experimental.refinement_types import Equality
from torch.fx.tensor_type import TensorType, Dyn, is_consistent, is_more_precise
from torch.fx.annotate import annotate
from torch.fx.experimental.graph_gradual_typechecker import GraphTypeChecker, broadcast_types, Refine
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx import GraphModule

            

Reported by Pylint.

Unable to import 'torch.fx.tensor_type'
Error

Line: 6 Column: 1

              from torch.fx import symbolic_trace
from torch.fx.experimental.unify_refinements import infer_symbolic_types
from torch.fx.experimental.refinement_types import Equality
from torch.fx.tensor_type import TensorType, Dyn, is_consistent, is_more_precise
from torch.fx.annotate import annotate
from torch.fx.experimental.graph_gradual_typechecker import GraphTypeChecker, broadcast_types, Refine
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx import GraphModule
from torch.fx.passes.shape_prop import ShapeProp

            

Reported by Pylint.

Unable to import 'torch.fx.annotate'
Error

Line: 7 Column: 1

              from torch.fx.experimental.unify_refinements import infer_symbolic_types
from torch.fx.experimental.refinement_types import Equality
from torch.fx.tensor_type import TensorType, Dyn, is_consistent, is_more_precise
from torch.fx.annotate import annotate
from torch.fx.experimental.graph_gradual_typechecker import GraphTypeChecker, broadcast_types, Refine
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx import GraphModule
from torch.fx.passes.shape_prop import ShapeProp
from torch.fx.experimental.unification import Var

            

Reported by Pylint.

Unable to import 'torch.fx.experimental.graph_gradual_typechecker'
Error

Line: 8 Column: 1

              from torch.fx.experimental.refinement_types import Equality
from torch.fx.tensor_type import TensorType, Dyn, is_consistent, is_more_precise
from torch.fx.annotate import annotate
from torch.fx.experimental.graph_gradual_typechecker import GraphTypeChecker, broadcast_types, Refine
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx import GraphModule
from torch.fx.passes.shape_prop import ShapeProp
from torch.fx.experimental.unification import Var


            

Reported by Pylint.

Unable to import 'torch.fx.experimental.rewriter'
Error

Line: 9 Column: 1

              from torch.fx.tensor_type import TensorType, Dyn, is_consistent, is_more_precise
from torch.fx.annotate import annotate
from torch.fx.experimental.graph_gradual_typechecker import GraphTypeChecker, broadcast_types, Refine
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx import GraphModule
from torch.fx.passes.shape_prop import ShapeProp
from torch.fx.experimental.unification import Var

try:

            

Reported by Pylint.

Unable to import 'torch.fx'
Error

Line: 10 Column: 1

              from torch.fx.annotate import annotate
from torch.fx.experimental.graph_gradual_typechecker import GraphTypeChecker, broadcast_types, Refine
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx import GraphModule
from torch.fx.passes.shape_prop import ShapeProp
from torch.fx.experimental.unification import Var

try:
    from torchvision.models import resnet50

            

Reported by Pylint.

Unable to import 'torch.fx.passes.shape_prop'
Error

Line: 11 Column: 1

              from torch.fx.experimental.graph_gradual_typechecker import GraphTypeChecker, broadcast_types, Refine
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx import GraphModule
from torch.fx.passes.shape_prop import ShapeProp
from torch.fx.experimental.unification import Var

try:
    from torchvision.models import resnet50


            

Reported by Pylint.

torch/onnx/symbolic_opset11.py
443 issues
Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 29 Column: 40

                      dtype = 6  # float
    else:
        dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
    min_val = g.op("Constant", value_t=torch.tensor(min_val, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
    max_val = g.op("Constant", value_t=torch.tensor(max_val, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
    return g.op("Clip", self, min_val, max_val)


def clamp(g, self, min, max):

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 30 Column: 40

                  else:
        dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
    min_val = g.op("Constant", value_t=torch.tensor(min_val, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
    max_val = g.op("Constant", value_t=torch.tensor(max_val, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
    return g.op("Clip", self, min_val, max_val)


def clamp(g, self, min, max):
    dtype = self.type().scalarType()

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 87 Column: 40

                      dtype = 6  # float
    else:
        dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
    min_val = g.op("Constant", value_t=torch.tensor(0, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
    max_val = g.op("Constant", value_t=torch.tensor(6, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
    return clamp(g, relu, min_val, max_val)


# Opset 11 gather accepts negative indices

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 88 Column: 40

                  else:
        dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
    min_val = g.op("Constant", value_t=torch.tensor(0, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
    max_val = g.op("Constant", value_t=torch.tensor(6, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
    return clamp(g, relu, min_val, max_val)


# Opset 11 gather accepts negative indices
@parse_args("v", "i", "v")

            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 191 Column: 70

                  dtype = sym_help.scalar_type_to_pytorch_type[dtype]

    if accumulate:
        zeros = g.op("ConstantOfShape", g.op("Shape", self), value_t=torch.tensor([0], dtype=dtype))
        result = g.op("ScatterND", zeros, index, values)
        result = add(g, self, result)
    else:
        result = g.op("ScatterND", self, index, values)


            

Reported by Pylint.

Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 252 Column: 43

              
@parse_args("v", "i", "none")
def cumsum(g, self, dim, dtype=None):
    dim_tensor = g.op("Constant", value_t=torch.tensor(dim, dtype=torch.int))
    if dtype and dtype.node().kind() != "prim::Constant":
        parsed_dtype = sym_help._get_const(dtype, "i", "dtype")
        cast = g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
    else:
        cast = self

            

Reported by Pylint.

Module 'torch' has no 'int' member; maybe 'inf'?
Error

Line: 252 Column: 67

              
@parse_args("v", "i", "none")
def cumsum(g, self, dim, dtype=None):
    dim_tensor = g.op("Constant", value_t=torch.tensor(dim, dtype=torch.int))
    if dtype and dtype.node().kind() != "prim::Constant":
        parsed_dtype = sym_help._get_const(dtype, "i", "dtype")
        cast = g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
    else:
        cast = self

            

Reported by Pylint.

Module 'torch' has no 'LongTensor' member
Error

Line: 274 Column: 30

                  # NOTE: source can have more elements than needed.
    # It could also have arbitrary shape.
    # This is not supported by ONNX::ScatterND, so we need to flatten and slice source tensor.
    source = view(g, source, torch.LongTensor([-1]))
    source = sym_help._slice_helper(g, source,
                                    axes=torch.LongTensor([0]),
                                    starts=torch.LongTensor([0]),
                                    ends=size(g, index, torch.LongTensor([0])),
                                    dynamic_slice=True)

            

Reported by Pylint.

Module 'torch' has no 'LongTensor' member
Error

Line: 276 Column: 42

                  # This is not supported by ONNX::ScatterND, so we need to flatten and slice source tensor.
    source = view(g, source, torch.LongTensor([-1]))
    source = sym_help._slice_helper(g, source,
                                    axes=torch.LongTensor([0]),
                                    starts=torch.LongTensor([0]),
                                    ends=size(g, index, torch.LongTensor([0])),
                                    dynamic_slice=True)
    return g.op("ScatterND", self, index, source)


            

Reported by Pylint.

Module 'torch' has no 'LongTensor' member
Error

Line: 277 Column: 44

                  source = view(g, source, torch.LongTensor([-1]))
    source = sym_help._slice_helper(g, source,
                                    axes=torch.LongTensor([0]),
                                    starts=torch.LongTensor([0]),
                                    ends=size(g, index, torch.LongTensor([0])),
                                    dynamic_slice=True)
    return g.op("ScatterND", self, index, source)



            

Reported by Pylint.

caffe2/python/data_parallel_model.py
443 issues
Use lazy % formatting in logging functions
Error

Line: 156 Column: 29

                  if not (cpu_device or ideep):
        for gpu in devices:
            if gpu >= workspace.NumGpuDevices():
                log.warning("** Only {} GPUs available, GPUs {} requested".format(
                    workspace.NumGpuDevices(), devices))
                break
        model_helper_obj._device_type = workspace.GpuDeviceType
        model_helper_obj._device_prefix = "gpu"
        model_helper_obj._shared_model = False

            

Reported by Pylint.

Access to a protected member _device_type of a client class
Error

Line: 159 Column: 9

                              log.warning("** Only {} GPUs available, GPUs {} requested".format(
                    workspace.NumGpuDevices(), devices))
                break
        model_helper_obj._device_type = workspace.GpuDeviceType
        model_helper_obj._device_prefix = "gpu"
        model_helper_obj._shared_model = False
        device_name = "GPU"
        assert shared_model is False, "Shared model only supported on CPU"
    elif ideep:

            

Reported by Pylint.

Access to a protected member _device_prefix of a client class
Error

Line: 160 Column: 9

                                  workspace.NumGpuDevices(), devices))
                break
        model_helper_obj._device_type = workspace.GpuDeviceType
        model_helper_obj._device_prefix = "gpu"
        model_helper_obj._shared_model = False
        device_name = "GPU"
        assert shared_model is False, "Shared model only supported on CPU"
    elif ideep:
        model_helper_obj._device_type = caffe2_pb2.IDEEP

            

Reported by Pylint.

Access to a protected member _shared_model of a client class
Error

Line: 161 Column: 9

                              break
        model_helper_obj._device_type = workspace.GpuDeviceType
        model_helper_obj._device_prefix = "gpu"
        model_helper_obj._shared_model = False
        device_name = "GPU"
        assert shared_model is False, "Shared model only supported on CPU"
    elif ideep:
        model_helper_obj._device_type = caffe2_pb2.IDEEP
        model_helper_obj._device_prefix = "ideep"

            

Reported by Pylint.

Access to a protected member _device_type of a client class
Error

Line: 165 Column: 9

                      device_name = "GPU"
        assert shared_model is False, "Shared model only supported on CPU"
    elif ideep:
        model_helper_obj._device_type = caffe2_pb2.IDEEP
        model_helper_obj._device_prefix = "ideep"
        device_name = "IDEEP"
        model_helper_obj._shared_model = shared_model
        if shared_model and rendezvous is not None:
            assert "Shared model only supported on single-node currently"

            

Reported by Pylint.

Access to a protected member _device_prefix of a client class
Error

Line: 166 Column: 9

                      assert shared_model is False, "Shared model only supported on CPU"
    elif ideep:
        model_helper_obj._device_type = caffe2_pb2.IDEEP
        model_helper_obj._device_prefix = "ideep"
        device_name = "IDEEP"
        model_helper_obj._shared_model = shared_model
        if shared_model and rendezvous is not None:
            assert "Shared model only supported on single-node currently"
    else:

            

Reported by Pylint.

Access to a protected member _shared_model of a client class
Error

Line: 168 Column: 9

                      model_helper_obj._device_type = caffe2_pb2.IDEEP
        model_helper_obj._device_prefix = "ideep"
        device_name = "IDEEP"
        model_helper_obj._shared_model = shared_model
        if shared_model and rendezvous is not None:
            assert "Shared model only supported on single-node currently"
    else:
        model_helper_obj._device_type = caffe2_pb2.CPU
        model_helper_obj._device_prefix = "cpu"

            

Reported by Pylint.

Assert statement has a string literal as its first argument. The assert will never fail.
Error

Line: 170 Column: 13

                      device_name = "IDEEP"
        model_helper_obj._shared_model = shared_model
        if shared_model and rendezvous is not None:
            assert "Shared model only supported on single-node currently"
    else:
        model_helper_obj._device_type = caffe2_pb2.CPU
        model_helper_obj._device_prefix = "cpu"
        device_name = "CPU"
        model_helper_obj._shared_model = shared_model

            

Reported by Pylint.

Access to a protected member _device_type of a client class
Error

Line: 172 Column: 9

                      if shared_model and rendezvous is not None:
            assert "Shared model only supported on single-node currently"
    else:
        model_helper_obj._device_type = caffe2_pb2.CPU
        model_helper_obj._device_prefix = "cpu"
        device_name = "CPU"
        model_helper_obj._shared_model = shared_model
        if shared_model and rendezvous is not None:
            assert "Shared model only supported on single-node currently"

            

Reported by Pylint.

Access to a protected member _device_prefix of a client class
Error

Line: 173 Column: 9

                          assert "Shared model only supported on single-node currently"
    else:
        model_helper_obj._device_type = caffe2_pb2.CPU
        model_helper_obj._device_prefix = "cpu"
        device_name = "CPU"
        model_helper_obj._shared_model = shared_model
        if shared_model and rendezvous is not None:
            assert "Shared model only supported on single-node currently"


            

Reported by Pylint.

test/test_jit_cuda_fuser.py
441 issues
Unable to import 'torch'
Error

Line: 4 Column: 1

              import unittest
import os

import torch

from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR
from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed

from test_jit import JitTestCase, RUN_CUDA

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 6 Column: 1

              
import torch

from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR
from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed

from test_jit import JitTestCase, RUN_CUDA

from jit.test_fuser_common import TestFuserCommon  # noqa: F401

            

Reported by Pylint.

Unable to import 'torch.testing._internal.codegen.random_topo_test'
Error

Line: 7 Column: 1

              import torch

from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR
from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed

from test_jit import JitTestCase, RUN_CUDA

from jit.test_fuser_common import TestFuserCommon  # noqa: F401


            

Reported by Pylint.

Cannot import 'test_jit' due to syntax error 'expected an indented block (<unknown>, line 12722)'
Error

Line: 9 Column: 1

              from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR
from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed

from test_jit import JitTestCase, RUN_CUDA

from jit.test_fuser_common import TestFuserCommon  # noqa: F401

import itertools
import numpy as np

            

Reported by Pylint.

Unused TestFuserCommon imported from jit.test_fuser_common
Error

Line: 11 Column: 1

              
from test_jit import JitTestCase, RUN_CUDA

from jit.test_fuser_common import TestFuserCommon  # noqa: F401

import itertools
import numpy as np

os.environ['PYTORCH_CUDA_FUSER_DISABLE_FALLBACK'] = '1'

            

Reported by Pylint.

Access to a protected member _jit_set_texpr_fuser_enabled of a client class
Error

Line: 21 Column: 5

              os.environ['PYTORCH_CUDA_FUSER_JIT_OPT_LEVEL'] = '0'

if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
    torch._C._jit_set_texpr_fuser_enabled(False)
    torch._C._jit_set_profiling_executor(True)
    torch._C._jit_set_profiling_mode(True)

FUSION_GROUP = 'prim::CudaFusionGroup'
FUSION_GUARD = 'prim::CudaFusionGuard'

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 21 Column: 5

              os.environ['PYTORCH_CUDA_FUSER_JIT_OPT_LEVEL'] = '0'

if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
    torch._C._jit_set_texpr_fuser_enabled(False)
    torch._C._jit_set_profiling_executor(True)
    torch._C._jit_set_profiling_mode(True)

FUSION_GROUP = 'prim::CudaFusionGroup'
FUSION_GUARD = 'prim::CudaFusionGuard'

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 22 Column: 5

              
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
    torch._C._jit_set_texpr_fuser_enabled(False)
    torch._C._jit_set_profiling_executor(True)
    torch._C._jit_set_profiling_mode(True)

FUSION_GROUP = 'prim::CudaFusionGroup'
FUSION_GUARD = 'prim::CudaFusionGuard'


            

Reported by Pylint.

Access to a protected member _jit_set_profiling_executor of a client class
Error

Line: 22 Column: 5

              
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
    torch._C._jit_set_texpr_fuser_enabled(False)
    torch._C._jit_set_profiling_executor(True)
    torch._C._jit_set_profiling_mode(True)

FUSION_GROUP = 'prim::CudaFusionGroup'
FUSION_GUARD = 'prim::CudaFusionGuard'


            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 23 Column: 5

              if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
    torch._C._jit_set_texpr_fuser_enabled(False)
    torch._C._jit_set_profiling_executor(True)
    torch._C._jit_set_profiling_mode(True)

FUSION_GROUP = 'prim::CudaFusionGroup'
FUSION_GUARD = 'prim::CudaFusionGuard'

class TestCudaFuser(JitTestCase):

            

Reported by Pylint.

torch/quantization/fx/quantization_patterns.py
427 issues
Attempted relative import beyond top-level package
Error

Line: 12 Column: 1

                  default_symmetric_fixed_qparams_fake_quant,
)

from ..quantization_mappings import (
    get_static_quant_module_class,
    get_dynamic_quant_module_class,
    get_quantized_operator,
)
from ..utils import (

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 17 Column: 1

                  get_dynamic_quant_module_class,
    get_quantized_operator,
)
from ..utils import (
    get_swapped_custom_module_class,
    activation_is_statically_quantized,
    activation_is_int8_quantized,
    weight_is_statically_quantized,
    get_qconfig_dtypes,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 27 Column: 1

                  get_qparam_dict,
)

from ..quantize import (
    is_activation_post_process,
)

from .pattern_utils import (
    register_quant_pattern,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 31 Column: 1

                  is_activation_post_process,
)

from .pattern_utils import (
    register_quant_pattern,
    get_default_output_activation_post_process_map,
    Pattern,
)


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 37 Column: 1

                  Pattern,
)

from .utils import (
    _parent_name,
    all_node_args_have_no_tensors,
    quantize_node,
    get_per_tensor_qparams,
    get_linear_prepack_op_for_dtype,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 48 Column: 1

                  get_qconv_op,
)

from ..qconfig import QConfigAny

from abc import ABC, abstractmethod
import operator
import warnings


            

Reported by Pylint.

Module 'torch' has no 'quint8' member
Error

Line: 184 Column: 6

              # tuple (activation_dtype, weight_dtype, compute_dtype)
# these are supported types for common binary ops like add/mul etc.
binary_op_all_dtypes = [
    (torch.quint8, torch.qint8, None),
    (torch.float16, torch.float16, None),
]
binary_op_float16_dtypes = [
    (torch.float16, torch.float16, None)
]

            

Reported by Pylint.

Module 'torch' has no 'qint8' member
Error

Line: 184 Column: 20

              # tuple (activation_dtype, weight_dtype, compute_dtype)
# these are supported types for common binary ops like add/mul etc.
binary_op_all_dtypes = [
    (torch.quint8, torch.qint8, None),
    (torch.float16, torch.float16, None),
]
binary_op_float16_dtypes = [
    (torch.float16, torch.float16, None)
]

            

Reported by Pylint.

Module 'torch' has no 'float16' member
Error

Line: 185 Column: 6

              # these are supported types for common binary ops like add/mul etc.
binary_op_all_dtypes = [
    (torch.quint8, torch.qint8, None),
    (torch.float16, torch.float16, None),
]
binary_op_float16_dtypes = [
    (torch.float16, torch.float16, None)
]
binary_op_int8_dtypes = [

            

Reported by Pylint.

Module 'torch' has no 'float16' member
Error

Line: 185 Column: 21

              # these are supported types for common binary ops like add/mul etc.
binary_op_all_dtypes = [
    (torch.quint8, torch.qint8, None),
    (torch.float16, torch.float16, None),
]
binary_op_float16_dtypes = [
    (torch.float16, torch.float16, None)
]
binary_op_int8_dtypes = [

            

Reported by Pylint.

torch/onnx/utils.py
423 issues
Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 1007 Column: 42

                                  return g.op("Constant", value_s=n["value"])
                elif n.output().type().isSubtypeOf(ListType.ofInts()) or n.output().type().isSubtypeOf(ListType.ofFloats()):
                    vals = n.output().toIValue()
                    value = torch.stack([torch.tensor(v) for v in vals]) if len(vals) else []
                    return g.op("Constant", value_t=value)
                elif n.output().type().kind() == "DeviceObjType":
                    return None
                else:
                    raise RuntimeError("Unsupported prim::Constant kind: `{}`. Send a bug report.".format(

            

Reported by Pylint.

Module 'torch' has no 'stack' member
Error

Line: 1007 Column: 29

                                  return g.op("Constant", value_s=n["value"])
                elif n.output().type().isSubtypeOf(ListType.ofInts()) or n.output().type().isSubtypeOf(ListType.ofFloats()):
                    vals = n.output().toIValue()
                    value = torch.stack([torch.tensor(v) for v in vals]) if len(vals) else []
                    return g.op("Constant", value_t=value)
                elif n.output().type().kind() == "DeviceObjType":
                    return None
                else:
                    raise RuntimeError("Unsupported prim::Constant kind: `{}`. Send a bug report.".format(

            

Reported by Pylint.

Module 'torch' has no 'CharTensor' member
Error

Line: 1162 Column: 19

                      dims = [1]
        isscalar = True
    type = type.lower()
    tensor: Union[torch.CharTensor, torch.ShortTensor,
                  torch.IntTensor, torch.LongTensor,
                  torch.HalfTensor, torch.FloatTensor,
                  torch.DoubleTensor]
    if type == "char":
        tensor = torch.CharTensor(*dims)

            

Reported by Pylint.

Module 'torch' has no 'ShortTensor' member
Error

Line: 1162 Column: 37

                      dims = [1]
        isscalar = True
    type = type.lower()
    tensor: Union[torch.CharTensor, torch.ShortTensor,
                  torch.IntTensor, torch.LongTensor,
                  torch.HalfTensor, torch.FloatTensor,
                  torch.DoubleTensor]
    if type == "char":
        tensor = torch.CharTensor(*dims)

            

Reported by Pylint.

Module 'torch' has no 'LongTensor' member
Error

Line: 1163 Column: 36

                      isscalar = True
    type = type.lower()
    tensor: Union[torch.CharTensor, torch.ShortTensor,
                  torch.IntTensor, torch.LongTensor,
                  torch.HalfTensor, torch.FloatTensor,
                  torch.DoubleTensor]
    if type == "char":
        tensor = torch.CharTensor(*dims)
    elif type == "short":

            

Reported by Pylint.

Module 'torch' has no 'IntTensor' member
Error

Line: 1163 Column: 19

                      isscalar = True
    type = type.lower()
    tensor: Union[torch.CharTensor, torch.ShortTensor,
                  torch.IntTensor, torch.LongTensor,
                  torch.HalfTensor, torch.FloatTensor,
                  torch.DoubleTensor]
    if type == "char":
        tensor = torch.CharTensor(*dims)
    elif type == "short":

            

Reported by Pylint.

Module 'torch' has no 'HalfTensor' member
Error

Line: 1164 Column: 19

                  type = type.lower()
    tensor: Union[torch.CharTensor, torch.ShortTensor,
                  torch.IntTensor, torch.LongTensor,
                  torch.HalfTensor, torch.FloatTensor,
                  torch.DoubleTensor]
    if type == "char":
        tensor = torch.CharTensor(*dims)
    elif type == "short":
        tensor = torch.ShortTensor(*dims)

            

Reported by Pylint.

Module 'torch' has no 'FloatTensor' member
Error

Line: 1164 Column: 37

                  type = type.lower()
    tensor: Union[torch.CharTensor, torch.ShortTensor,
                  torch.IntTensor, torch.LongTensor,
                  torch.HalfTensor, torch.FloatTensor,
                  torch.DoubleTensor]
    if type == "char":
        tensor = torch.CharTensor(*dims)
    elif type == "short":
        tensor = torch.ShortTensor(*dims)

            

Reported by Pylint.

Module 'torch' has no 'DoubleTensor' member
Error

Line: 1165 Column: 19

                  tensor: Union[torch.CharTensor, torch.ShortTensor,
                  torch.IntTensor, torch.LongTensor,
                  torch.HalfTensor, torch.FloatTensor,
                  torch.DoubleTensor]
    if type == "char":
        tensor = torch.CharTensor(*dims)
    elif type == "short":
        tensor = torch.ShortTensor(*dims)
    elif type == "int":

            

Reported by Pylint.

Module 'torch' has no 'CharTensor' member
Error

Line: 1167 Column: 18

                                torch.HalfTensor, torch.FloatTensor,
                  torch.DoubleTensor]
    if type == "char":
        tensor = torch.CharTensor(*dims)
    elif type == "short":
        tensor = torch.ShortTensor(*dims)
    elif type == "int":
        tensor = torch.IntTensor(*dims)
    elif type == "long":

            

Reported by Pylint.