The following issues were found

benchmarks/instruction_counts/core/expand.py
23 issues
Unable to import 'torch'
Error

Line: 14 Column: 1

              from typing import cast, List, Optional, Tuple, TYPE_CHECKING
import uuid

import torch

if TYPE_CHECKING:
    # See the note in api.py for why this is necessary.
    from torch.utils.benchmark.utils.timer import Language
else:

            

Reported by Pylint.

Unable to import 'torch.utils.benchmark.utils.timer'
Error

Line: 18 Column: 5

              
if TYPE_CHECKING:
    # See the note in api.py for why this is necessary.
    from torch.utils.benchmark.utils.timer import Language
else:
    from torch.utils.benchmark import Language

from core.api import AutogradMode, AutoLabels, GroupedBenchmark, RuntimeMode, TimerArgs
from core.types import FlatDefinition, FlatIntermediateDefinition, Label

            

Reported by Pylint.

Unable to import 'torch.utils.benchmark'
Error

Line: 20 Column: 5

                  # See the note in api.py for why this is necessary.
    from torch.utils.benchmark.utils.timer import Language
else:
    from torch.utils.benchmark import Language

from core.api import AutogradMode, AutoLabels, GroupedBenchmark, RuntimeMode, TimerArgs
from core.types import FlatDefinition, FlatIntermediateDefinition, Label
from core.utils import get_temp_dir


            

Reported by Pylint.

Using variable 'Language' before assignment
Error

Line: 30 Column: 5

              _ALL_MODES = tuple(it.product(
    RuntimeMode,
    AutogradMode,
    Language,
))


def _generate_torchscript_file(model_src: str, name: str) -> Optional[str]:
    """Returns the path a saved model if one can be constructed from `spec`.

            

Reported by Pylint.

Access to a protected member _make_model_invocation of a client class
Error

Line: 108 Column: 17

                  else:
        assert runtime == RuntimeMode.JIT
        assert benchmark.signature_args is not None
        stmts = GroupedBenchmark._make_model_invocation(
            benchmark.signature_args, benchmark.signature_output, RuntimeMode.JIT)

    stmt = stmts[0 if is_python else 1]

    if autograd == AutogradMode.FORWARD_BACKWARD and stmt is not None:

            

Reported by Pylint.

Import "from core.api import AutogradMode, AutoLabels, GroupedBenchmark, RuntimeMode, TimerArgs" should be placed at the top of the module
Error

Line: 22 Column: 1

              else:
    from torch.utils.benchmark import Language

from core.api import AutogradMode, AutoLabels, GroupedBenchmark, RuntimeMode, TimerArgs
from core.types import FlatDefinition, FlatIntermediateDefinition, Label
from core.utils import get_temp_dir


_ALL_MODES = tuple(it.product(

            

Reported by Pylint.

Import "from core.types import FlatDefinition, FlatIntermediateDefinition, Label" should be placed at the top of the module
Error

Line: 23 Column: 1

                  from torch.utils.benchmark import Language

from core.api import AutogradMode, AutoLabels, GroupedBenchmark, RuntimeMode, TimerArgs
from core.types import FlatDefinition, FlatIntermediateDefinition, Label
from core.utils import get_temp_dir


_ALL_MODES = tuple(it.product(
    RuntimeMode,

            

Reported by Pylint.

Import "from core.utils import get_temp_dir" should be placed at the top of the module
Error

Line: 24 Column: 1

              
from core.api import AutogradMode, AutoLabels, GroupedBenchmark, RuntimeMode, TimerArgs
from core.types import FlatDefinition, FlatIntermediateDefinition, Label
from core.utils import get_temp_dir


_ALL_MODES = tuple(it.product(
    RuntimeMode,
    AutogradMode,

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 45
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                  `model_src` must contain `jit_model = ...`, which `materialize` will supply.
    """
    # Double check.
    assert "jit_model = " in model_src, f"Missing jit_model definition:\n{model_src}"

    # `torch.utils.benchmark.Timer` will automatically import torch, so we
    # need to match that convention.
    model_src = f"import torch\n{model_src}"


            

Reported by Bandit.

Variable name "f" doesn't conform to snake_case naming style
Error

Line: 61 Column: 37

                      # to confirm.
        raise ValueError(f"File {module_path} already exists.")

    with open(module_path, "wt") as f:
        f.write(model_src)

    # Import magic to actually load our function.
    module_spec = importlib.util.spec_from_file_location(f"torchscript__{name}", module_path)
    module = importlib.util.module_from_spec(module_spec)

            

Reported by Pylint.

caffe2/python/_import_c_extension.py
23 issues
Undefined variable 'num_cuda_devices'
Error

Line: 18 Column: 12

              
    try:
        from caffe2.python.caffe2_pybind11_state_gpu import *  # noqa
        if num_cuda_devices():  # noqa
            has_gpu_support = has_cuda_support = True
    except ImportError as gpu_e:
        logging.info('Failed to import cuda module: {}'.format(gpu_e))
        try:
            from caffe2.python.caffe2_pybind11_state_hip import *  # noqa

            

Reported by Pylint.

Undefined variable 'on_module_exit'
Error

Line: 45 Column: 17

              
# libcaffe2_python contains a global Workspace that we need to properly delete
# when exiting. Otherwise, cudart will cause segfaults sometimes.
atexit.register(on_module_exit)  # noqa


# Add functionalities for the TensorCPU interface.
def _TensorCPU_shape(self):
    return tuple(self._shape)

            

Reported by Pylint.

Undefined variable 'TensorCPU'
Error

Line: 56 Column: 1

              def _TensorCPU_reshape(self, shape):
    return self._reshape(list(shape))

TensorCPU.shape = property(_TensorCPU_shape)  # noqa
TensorCPU.reshape = _TensorCPU_reshape  # noqa

            

Reported by Pylint.

Undefined variable 'TensorCPU'
Error

Line: 57 Column: 1

                  return self._reshape(list(shape))

TensorCPU.shape = property(_TensorCPU_shape)  # noqa
TensorCPU.reshape = _TensorCPU_reshape  # noqa

            

Reported by Pylint.

Wildcard import caffe2.python.caffe2_pybind11_state_gpu
Error

Line: 17 Column: 9

                  has_gpu_support = False

    try:
        from caffe2.python.caffe2_pybind11_state_gpu import *  # noqa
        if num_cuda_devices():  # noqa
            has_gpu_support = has_cuda_support = True
    except ImportError as gpu_e:
        logging.info('Failed to import cuda module: {}'.format(gpu_e))
        try:

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 21 Column: 22

                      if num_cuda_devices():  # noqa
            has_gpu_support = has_cuda_support = True
    except ImportError as gpu_e:
        logging.info('Failed to import cuda module: {}'.format(gpu_e))
        try:
            from caffe2.python.caffe2_pybind11_state_hip import *  # noqa
            # we stop checking whether we have AMD GPU devices on the host,
            # because we may be constructing a net on a machine without GPU,
            # and run the net on another one with GPU

            

Reported by Pylint.

Wildcard import caffe2.python.caffe2_pybind11_state_hip
Error

Line: 23 Column: 13

                  except ImportError as gpu_e:
        logging.info('Failed to import cuda module: {}'.format(gpu_e))
        try:
            from caffe2.python.caffe2_pybind11_state_hip import *  # noqa
            # we stop checking whether we have AMD GPU devices on the host,
            # because we may be constructing a net on a machine without GPU,
            # and run the net on another one with GPU
            has_gpu_support = has_hip_support = True
            logging.info('This caffe2 python run has AMD GPU support!')

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 30 Column: 26

                          has_gpu_support = has_hip_support = True
            logging.info('This caffe2 python run has AMD GPU support!')
        except ImportError as hip_e:
            logging.info('Failed to import AMD hip module: {}'.format(hip_e))

            logging.warning(
                'This caffe2 python run failed to load cuda module:{},'
                'and AMD hip module:{}.'
                'Will run in CPU only mode.'.format(gpu_e, hip_e))

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 33 Column: 17

                          logging.info('Failed to import AMD hip module: {}'.format(hip_e))

            logging.warning(
                'This caffe2 python run failed to load cuda module:{},'
                'and AMD hip module:{}.'
                'Will run in CPU only mode.'.format(gpu_e, hip_e))
            try:
                from caffe2.python.caffe2_pybind11_state import *  # noqa
            except ImportError as cpu_e:

            

Reported by Pylint.

Wildcard import caffe2.python.caffe2_pybind11_state
Error

Line: 37 Column: 17

                              'and AMD hip module:{}.'
                'Will run in CPU only mode.'.format(gpu_e, hip_e))
            try:
                from caffe2.python.caffe2_pybind11_state import *  # noqa
            except ImportError as cpu_e:
                logging.critical(
                    'Cannot load caffe2.python. Error: {0}'.format(str(cpu_e)))
                sys.exit(1)


            

Reported by Pylint.

benchmarks/operator_benchmark/pt/qactivation_test.py
23 issues
Unable to import 'torch'
Error

Line: 1 Column: 1

              import torch
import torch.nn.quantized as nnq

import operator_benchmark as op_bench

r"""Microbenchmarks for the quantized activations."""

qactivation_long_configs = op_bench.cross_product_configs(
    dims=(

            

Reported by Pylint.

Unable to import 'torch.nn.quantized'
Error

Line: 2 Column: 1

              import torch
import torch.nn.quantized as nnq

import operator_benchmark as op_bench

r"""Microbenchmarks for the quantized activations."""

qactivation_long_configs = op_bench.cross_product_configs(
    dims=(

            

Reported by Pylint.

Module 'operator_benchmark' has no 'cross_product_configs' member
Error

Line: 8 Column: 28

              
r"""Microbenchmarks for the quantized activations."""

qactivation_long_configs = op_bench.cross_product_configs(
    dims=(
        # VGG-16 relu's with original shape: (-1, 3, 224, 224)
        ( 64, 224, 224),  # ReLU-1   # noqa: E201
        (128, 112, 112),  # ReLU-6
        (256,  56,  56),  # ReLU-11  # noqa: E241

            

Reported by Pylint.

Module 'operator_benchmark' has no 'cross_product_configs' member
Error

Line: 29 Column: 29

                  tags=('long',)
)

qactivation_short_configs = op_bench.cross_product_configs(
    dims=(
        (3, 4, 5),      # Rank=3
        (2, 3, 4, 5),    # Rank=4,
        # Dimensions from the floating point benchmarks
        (512, 512),

            

Reported by Pylint.

Module 'operator_benchmark' has no 'op_list' member
Error

Line: 43 Column: 19

                  tags=('short',)
)

qactivation_ops = op_bench.op_list(
    attrs=(
        ('relu', torch.nn.ReLU()),
        ('relu6', torch.ops.quantized.relu6),
        ('functional.hardtanh', nnq.functional.hardtanh),
        ('functional.hardsigmoid', nnq.functional.hardsigmoid),

            

Reported by Pylint.

Module 'operator_benchmark' has no 'TorchBenchmarkBase' member
Error

Line: 57 Column: 32

              )


class QActivationBenchmarkBase(op_bench.TorchBenchmarkBase):
    r"""Base class for all the activations."""
    def _setup(self, dims, contig, dtype):
        # Input
        f_input = (torch.rand(*dims) - 0.5) * 256
        self.scale = 1.0

            

Reported by Pylint.

Module 'operator_benchmark' has no 'generate_pt_tests_from_op_list' member
Error

Line: 88 Column: 1

                      return self.qop(q_input)


op_bench.generate_pt_tests_from_op_list(qactivation_ops,
                                        qactivation_short_configs + qactivation_long_configs,
                                        QActivationBenchmark)


qactivation_scale_zero_point_ops = op_bench.op_list(

            

Reported by Pylint.

Module 'operator_benchmark' has no 'op_list' member
Error

Line: 93 Column: 36

                                                      QActivationBenchmark)


qactivation_scale_zero_point_ops = op_bench.op_list(
    attrs=(
        ('functional.hardswish', nnq.functional.hardswish),
        ('functional.elu', nnq.functional.elu),
        ('functional.celu', nnq.functional.celu),
    ),

            

Reported by Pylint.

Module 'operator_benchmark' has no 'generate_pt_tests_from_op_list' member
Error

Line: 106 Column: 1

                  def forward(self, q_input):
        return self.qop(q_input, scale=self.scale, zero_point=self.zero_point)

op_bench.generate_pt_tests_from_op_list(qactivation_scale_zero_point_ops,
                                        qactivation_short_configs + qactivation_long_configs,
                                        QActivationScaleZeroPointBenchmark)

if __name__ == "__main__":
    op_bench.benchmark_runner.main()

            

Reported by Pylint.

String statement has no effect
Error

Line: 6 Column: 1

              
import operator_benchmark as op_bench

r"""Microbenchmarks for the quantized activations."""

qactivation_long_configs = op_bench.cross_product_configs(
    dims=(
        # VGG-16 relu's with original shape: (-1, 3, 224, 224)
        ( 64, 224, 224),  # ReLU-1   # noqa: E201

            

Reported by Pylint.

caffe2/contrib/aten/docs/sample.py
23 issues
Unable to import 'onnx'
Error

Line: 9 Column: 1

              from torch.autograd import Variable, Function
import torch.onnx

import onnx
import caffe2.python.onnx.backend

class MyFunction(Function):
    @staticmethod
    def forward(ctx, x, y):

            

Reported by Pylint.

Module 'torch' has no 'ones' member
Error

Line: 34 Column: 29

              
f = tempfile.NamedTemporaryFile()
torch.onnx.export(MyModule(),
                  (Variable(torch.ones(3, 4)), Variable(torch.ones(3, 4))),
                  f, verbose=True)

# prints the graph for debugging:
# graph(%input : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu),
#       %y : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)):

            

Reported by Pylint.

Module 'torch' has no 'ones' member
Error

Line: 34 Column: 57

              
f = tempfile.NamedTemporaryFile()
torch.onnx.export(MyModule(),
                  (Variable(torch.ones(3, 4)), Variable(torch.ones(3, 4))),
                  f, verbose=True)

# prints the graph for debugging:
# graph(%input : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu),
#       %y : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)):

            

Reported by Pylint.

Method 'backward' is abstract in class 'Function' but is not overridden
Error

Line: 12 Column: 1

              import onnx
import caffe2.python.onnx.backend

class MyFunction(Function):
    @staticmethod
    def forward(ctx, x, y):
        return x * x + y

    @staticmethod

            

Reported by Pylint.

Redefining name 'x' from outer scope (line 54)
Error

Line: 14 Column: 22

              
class MyFunction(Function):
    @staticmethod
    def forward(ctx, x, y):
        return x * x + y

    @staticmethod
    def symbolic(graph, x, y):
        x2 = graph.at("mul", x, x)

            

Reported by Pylint.

Parameters differ from overridden 'forward' method
Error

Line: 14 Column: 5

              
class MyFunction(Function):
    @staticmethod
    def forward(ctx, x, y):
        return x * x + y

    @staticmethod
    def symbolic(graph, x, y):
        x2 = graph.at("mul", x, x)

            

Reported by Pylint.

Redefining name 'graph' from outer scope (line 45)
Error

Line: 18 Column: 18

                      return x * x + y

    @staticmethod
    def symbolic(graph, x, y):
        x2 = graph.at("mul", x, x)
        r = graph.at("add", x2, y)
        # x, y, x2, and r are 'Node' objects
        # print(r) or print(graph) will print out a textual representation for debugging.
        # this representation will be converted to ONNX protobufs on export.

            

Reported by Pylint.

Redefining name 'x' from outer scope (line 54)
Error

Line: 18 Column: 25

                      return x * x + y

    @staticmethod
    def symbolic(graph, x, y):
        x2 = graph.at("mul", x, x)
        r = graph.at("add", x2, y)
        # x, y, x2, and r are 'Node' objects
        # print(r) or print(graph) will print out a textual representation for debugging.
        # this representation will be converted to ONNX protobufs on export.

            

Reported by Pylint.

Redefining name 'r' from outer scope (line 55)
Error

Line: 20 Column: 9

                  @staticmethod
    def symbolic(graph, x, y):
        x2 = graph.at("mul", x, x)
        r = graph.at("add", x2, y)
        # x, y, x2, and r are 'Node' objects
        # print(r) or print(graph) will print out a textual representation for debugging.
        # this representation will be converted to ONNX protobufs on export.
        return r


            

Reported by Pylint.

Redefining name 'x' from outer scope (line 54)
Error

Line: 27 Column: 23

                      return r

class MyModule(nn.Module):
    def forward(self, x, y):
        # you can combine your ATen ops with standard onnx ones
        x = nn.ReLU()(x)
        return MyFunction.apply(x, y)

f = tempfile.NamedTemporaryFile()

            

Reported by Pylint.

caffe2/python/modeling/parameter_sharing.py
23 issues
Access to a protected member _NAMESCOPE_SEPARATOR of a client class
Error

Line: 41 Column: 44

                      """
        best_scope = candidate_scope
        best_scope_idx = 0
        sub_scopes = candidate_scope.split(scope._NAMESCOPE_SEPARATOR)

        cur_scope = ''
        for idx, sub_scope in enumerate(sub_scopes):
            cur_scope = cur_scope + sub_scope + scope._NAMESCOPE_SEPARATOR
            if cur_scope in self._scope_overrides:

            

Reported by Pylint.

Access to a protected member _NAMESCOPE_SEPARATOR of a client class
Error

Line: 45 Column: 49

              
        cur_scope = ''
        for idx, sub_scope in enumerate(sub_scopes):
            cur_scope = cur_scope + sub_scope + scope._NAMESCOPE_SEPARATOR
            if cur_scope in self._scope_overrides:
                best_scope = self._scope_overrides[cur_scope]
                best_scope_idx = idx
        if best_scope == candidate_scope:
            return candidate_scope

            

Reported by Pylint.

Access to a protected member _NAMESCOPE_SEPARATOR of a client class
Error

Line: 53 Column: 21

                          return candidate_scope
        else:
            return (self._resolve_scope_overrides(best_scope) +
                    scope._NAMESCOPE_SEPARATOR.join(
                        sub_scopes[best_scope_idx + 1:]))

    def get_parameter_name(self, name):
        candidate_scope = scope.CurrentNameScope()
        best_scope = self._resolve_scope_overrides(candidate_scope)

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 60 Column: 25

                      candidate_scope = scope.CurrentNameScope()
        best_scope = self._resolve_scope_overrides(candidate_scope)
        if best_scope != candidate_scope:
            logger.info("Overwriting scope {0} with scope {1}".format(
                candidate_scope, best_scope))

        return best_scope + name

    def add_scope_overrides(self, shared_scopes):

            

Reported by Pylint.

Access to a protected member _NAMESCOPE_SEPARATOR of a client class
Error

Line: 81 Column: 39

              

def _normalize_namescope(namescope):
    if namescope and namescope[-1] != scope._NAMESCOPE_SEPARATOR:
        return namescope + scope._NAMESCOPE_SEPARATOR
    else:
        return namescope



            

Reported by Pylint.

Access to a protected member _NAMESCOPE_SEPARATOR of a client class
Error

Line: 82 Column: 28

              
def _normalize_namescope(namescope):
    if namescope and namescope[-1] != scope._NAMESCOPE_SEPARATOR:
        return namescope + scope._NAMESCOPE_SEPARATOR
    else:
        return namescope


@contextlib.contextmanager

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              




from caffe2.python import scope

import contextlib
import logging

            

Reported by Pylint.

standard import "import contextlib" should be placed before "from caffe2.python import scope"
Error

Line: 8 Column: 1

              
from caffe2.python import scope

import contextlib
import logging

logger = logging.getLogger(__name__)



            

Reported by Pylint.

standard import "import logging" should be placed before "from caffe2.python import scope"
Error

Line: 9 Column: 1

              from caffe2.python import scope

import contextlib
import logging

logger = logging.getLogger(__name__)


class ParameterSharingContext(object):

            

Reported by Pylint.

Class 'ParameterSharingContext' inherits from object, can be safely removed from bases in python3
Error

Line: 14 Column: 1

              logger = logging.getLogger(__name__)


class ParameterSharingContext(object):
    """
    This class manages scope driven way of parameter sharing across different
    NameScopes.
    """


            

Reported by Pylint.

android/test_app/app/src/main/java/org/pytorch/testapp/MainActivity.java
23 issues
Avoid instantiating FileInputStream, FileOutputStream, FileReader, or FileWriter
Performance

Line: 66

                  }

    try (InputStream is = context.getAssets().open(assetName)) {
      try (OutputStream os = new FileOutputStream(file)) {
        byte[] buffer = new byte[4 * 1024];
        int read;
        while ((read = is.read(buffer)) != -1) {
          os.write(buffer, 0, read);
        }

            

Reported by PMD.

Found non-transient, non-static member. Please mark as transient or provide accessors.
Error

Line: 32

                private static final String TAG = BuildConfig.LOGCAT_TAG;
  private static final int TEXT_TRIM_SIZE = 4096;

  private TextView mTextView;

  protected HandlerThread mBackgroundThread;
  protected Handler mBackgroundHandler;
  private Module mModule;
  private FloatBuffer mInputTensorBuffer;

            

Reported by PMD.

Found non-transient, non-static member. Please mark as transient or provide accessors.
Error

Line: 34

              
  private TextView mTextView;

  protected HandlerThread mBackgroundThread;
  protected Handler mBackgroundHandler;
  private Module mModule;
  private FloatBuffer mInputTensorBuffer;
  private Tensor mInputTensor;
  private StringBuilder mTextViewStringBuilder = new StringBuilder();

            

Reported by PMD.

Found non-transient, non-static member. Please mark as transient or provide accessors.
Error

Line: 35

                private TextView mTextView;

  protected HandlerThread mBackgroundThread;
  protected Handler mBackgroundHandler;
  private Module mModule;
  private FloatBuffer mInputTensorBuffer;
  private Tensor mInputTensor;
  private StringBuilder mTextViewStringBuilder = new StringBuilder();


            

Reported by PMD.

Found non-transient, non-static member. Please mark as transient or provide accessors.
Error

Line: 36

              
  protected HandlerThread mBackgroundThread;
  protected Handler mBackgroundHandler;
  private Module mModule;
  private FloatBuffer mInputTensorBuffer;
  private Tensor mInputTensor;
  private StringBuilder mTextViewStringBuilder = new StringBuilder();

  private final Runnable mModuleForwardRunnable =

            

Reported by PMD.

Found non-transient, non-static member. Please mark as transient or provide accessors.
Error

Line: 37

                protected HandlerThread mBackgroundThread;
  protected Handler mBackgroundHandler;
  private Module mModule;
  private FloatBuffer mInputTensorBuffer;
  private Tensor mInputTensor;
  private StringBuilder mTextViewStringBuilder = new StringBuilder();

  private final Runnable mModuleForwardRunnable =
      new Runnable() {

            

Reported by PMD.

Found non-transient, non-static member. Please mark as transient or provide accessors.
Error

Line: 38

                protected Handler mBackgroundHandler;
  private Module mModule;
  private FloatBuffer mInputTensorBuffer;
  private Tensor mInputTensor;
  private StringBuilder mTextViewStringBuilder = new StringBuilder();

  private final Runnable mModuleForwardRunnable =
      new Runnable() {
        @Override

            

Reported by PMD.

Found non-transient, non-static member. Please mark as transient or provide accessors.
Error

Line: 39

                private Module mModule;
  private FloatBuffer mInputTensorBuffer;
  private Tensor mInputTensor;
  private StringBuilder mTextViewStringBuilder = new StringBuilder();

  private final Runnable mModuleForwardRunnable =
      new Runnable() {
        @Override
        public void run() {

            

Reported by PMD.

StringBuffers can grow quite a lot, and so may become a source of memory leak (if the owning class has a long life time).
Design

Line: 39

                private Module mModule;
  private FloatBuffer mInputTensorBuffer;
  private Tensor mInputTensor;
  private StringBuilder mTextViewStringBuilder = new StringBuilder();

  private final Runnable mModuleForwardRunnable =
      new Runnable() {
        @Override
        public void run() {

            

Reported by PMD.

Private field 'mTextViewStringBuilder' could be made final; it is only initialized in the declaration or constructor.
Design

Line: 39

                private Module mModule;
  private FloatBuffer mInputTensorBuffer;
  private Tensor mInputTensor;
  private StringBuilder mTextViewStringBuilder = new StringBuilder();

  private final Runnable mModuleForwardRunnable =
      new Runnable() {
        @Override
        public void run() {

            

Reported by PMD.

caffe2/python/muji.py
23 issues
Function name "OnGPU" doesn't conform to snake_case naming style
Error

Line: 23 Column: 1

              from caffe2.python import workspace


def OnGPU(gpu_id):
    """A utility function that returns a device option protobuf of the
  specified gpu id.
  """
    device_option = caffe2_pb2.DeviceOption()
    device_option.device_type = workspace.GpuDeviceType

            

Reported by Pylint.

Function name "OnCPU" doesn't conform to snake_case naming style
Error

Line: 33 Column: 1

                  return device_option


def OnCPU():
    device_option = caffe2_pb2.DeviceOption()
    device_option.device_type = caffe2_pb2.CPU
    return device_option



            

Reported by Pylint.

Missing function or method docstring
Error

Line: 33 Column: 1

                  return device_option


def OnCPU():
    device_option = caffe2_pb2.DeviceOption()
    device_option.device_type = caffe2_pb2.CPU
    return device_option



            

Reported by Pylint.

Function name "Allreduce" doesn't conform to snake_case naming style
Error

Line: 39 Column: 1

                  return device_option


def Allreduce(net, blobs, reduced_affix="_reduced", gpu_indices=None):
    """The general Allreduce interface that reroutes the function calls.
    CPUs and AMD GPUs are not supported because
    GetGpuPeerAccessPattern is called to get gpu peer access pattern.
  """
    if gpu_indices is None:

            

Reported by Pylint.

Unnecessary "elif" after "return"
Error

Line: 52 Column: 5

                          (len(gpu_indices), len(blobs))
        )
    pattern = workspace.GetGpuPeerAccessPattern()
    if len(blobs) == 2 and pattern.shape[0] >= 2 and np.all(pattern[:2, :2]):
        return Allreduce2(net, blobs, reduced_affix, gpu_indices)
    elif len(blobs) == 4 and pattern.shape[0] >= 4 and np.all(pattern[:4, :4]):
        return Allreduce4(net, blobs, reduced_affix, gpu_indices)
    elif len(blobs) == 4 and pattern.shape[0] >= 4 and np.all(pattern[:2, :2]) and np.all(pattern[2:4, 2:4]):
        return Allreduce4Group2(net, blobs, reduced_affix, gpu_indices)

            

Reported by Pylint.

Line too long (109/100)
Error

Line: 56 Column: 1

                      return Allreduce2(net, blobs, reduced_affix, gpu_indices)
    elif len(blobs) == 4 and pattern.shape[0] >= 4 and np.all(pattern[:4, :4]):
        return Allreduce4(net, blobs, reduced_affix, gpu_indices)
    elif len(blobs) == 4 and pattern.shape[0] >= 4 and np.all(pattern[:2, :2]) and np.all(pattern[2:4, 2:4]):
        return Allreduce4Group2(net, blobs, reduced_affix, gpu_indices)
    elif len(blobs) == 8 and pattern.shape[0] >= 8 and np.all(pattern[:8, :8]):
        return Allreduce8(net, blobs, reduced_affix, gpu_indices)
    else:
        return AllreduceFallback(net, blobs, reduced_affix, gpu_indices)

            

Reported by Pylint.

Function name "Allreduce2" doesn't conform to snake_case naming style
Error

Line: 64 Column: 1

                      return AllreduceFallback(net, blobs, reduced_affix, gpu_indices)


def Allreduce2(net, blobs, reduced_affix, gpu_indices):
    """Allreduce for 2 gpus.

  Algorithm: 0r <- 0 + 1, 1r <- 0r, where r means "reduced"
  """
    a, b = blobs

            

Reported by Pylint.

Variable name "a" doesn't conform to snake_case naming style
Error

Line: 69 Column: 5

              
  Algorithm: 0r <- 0 + 1, 1r <- 0r, where r means "reduced"
  """
    a, b = blobs
    gpu_a, gpu_b = gpu_indices
    a_reduced = net.Add([a, b], a + reduced_affix, device_option=OnGPU(gpu_a))
    b_reduced = a_reduced.Copy(
        [],
        b + reduced_affix,

            

Reported by Pylint.

Variable name "b" doesn't conform to snake_case naming style
Error

Line: 69 Column: 8

              
  Algorithm: 0r <- 0 + 1, 1r <- 0r, where r means "reduced"
  """
    a, b = blobs
    gpu_a, gpu_b = gpu_indices
    a_reduced = net.Add([a, b], a + reduced_affix, device_option=OnGPU(gpu_a))
    b_reduced = a_reduced.Copy(
        [],
        b + reduced_affix,

            

Reported by Pylint.

Function name "Allreduce4" doesn't conform to snake_case naming style
Error

Line: 80 Column: 1

                  return a_reduced, b_reduced


def Allreduce4(net, blobs, reduced_affix, gpu_indices):
    """Allreduce for 4 gpus.

  Algorithm: 2 level reduction.
      0r <- 0 + 1, 2r <- 2 + 3
      0r <- 0r + 2r

            

Reported by Pylint.

.circleci/scripts/trigger_azure_pipeline.py
23 issues
Redefining name 'build_id' from outer scope (line 126)
Error

Line: 40 Column: 5

                      print("Failed to parse the response. Check if the Azure DevOps PAT is incorrect or expired.")
        sys.exit(-1)

    build_id = run_build_json['id']

    print("Submitted bulid: " + str(build_id))
    print("Bulid URL: " + run_build_json['url'])
    return build_id


            

Reported by Pylint.

Redefining name 'build_status' from outer scope (line 127)
Error

Line: 62 Column: 5

              
def wait_for_build(_id):
    build_detail = get_build(_id)
    build_status = build_detail['status']

    while build_status == 'notStarted':
        print('Waiting for run to start: ' + str(_id))
        sys.stdout.flush()
        try:

            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 70 Column: 16

                      try:
            build_detail = get_build(_id)
            build_status = build_detail['status']
        except Exception as e:
            print("Error getting build")
            print(e)

        time.sleep(30)


            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 83 Column: 16

                      try:
            print("Waiting for log: " + str(_id))
            logs = get_build_logs(_id)
        except Exception as e:
            print("Error fetching logs")
            print(e)
            time.sleep(30)
            continue


            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 98 Column: 20

                          try:
                log_content = get_log_content(log['url'])
                print(log_content)
            except Exception as e:
                print("Error getting log content")
                print(e)
            sys.stdout.flush()
        build_detail = get_build(_id)
        build_status = build_detail['status']

            

Reported by Pylint.

Redefining name 'build_result' from outer scope (line 127)
Error

Line: 106 Column: 5

                      build_status = build_detail['status']
        time.sleep(30)

    build_result = build_detail['result']

    print("Bulid status: " + build_status)
    print("Bulid result: " + build_result)

    return build_status, build_result

            

Reported by Pylint.

Line too long (105/100)
Error

Line: 1 Column: 1

              # Documentation: https://docs.microsoft.com/en-us/rest/api/azure/devops/build/?view=azure-devops-rest-6.0

import re
import json
import os
import sys
import requests
import time


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # Documentation: https://docs.microsoft.com/en-us/rest/api/azure/devops/build/?view=azure-devops-rest-6.0

import re
import json
import os
import sys
import requests
import time


            

Reported by Pylint.

standard import "import time" should be placed before "import requests"
Error

Line: 8 Column: 1

              import os
import sys
import requests
import time

AZURE_PIPELINE_BASE_URL = "https://aiinfra.visualstudio.com/PyTorch/"
AZURE_DEVOPS_PAT_BASE64 = os.environ.get("AZURE_DEVOPS_PAT_BASE64_SECRET", "")
PIPELINE_ID = "911"
PROJECT_ID = "0628bce4-2d33-499e-bac5-530e12db160f"

            

Reported by Pylint.

Constant name "build_base_url" doesn't conform to UPPER_CASE naming style
Error

Line: 17 Column: 1

              TARGET_BRANCH = os.environ.get("CIRCLE_BRANCH", "master")
TARGET_COMMIT = os.environ.get("CIRCLE_SHA1", "")

build_base_url = AZURE_PIPELINE_BASE_URL + "_apis/build/builds?api-version=6.0"

s = requests.Session()
s.headers.update({"Authorization": "Basic " + AZURE_DEVOPS_PAT_BASE64})

def submit_build(pipeline_id, project_id, source_branch, source_version):

            

Reported by Pylint.

benchmarks/operator_benchmark/pt/qembedding_pack_test.py
22 issues
Unable to import 'torch'
Error

Line: 3 Column: 1

              
import operator_benchmark as op_bench
import torch
import numpy as np

embeddingbag_conversion_short_configs = op_bench.cross_product_configs(
    num_embeddings=(80,),
    embedding_dim=(128, 256, 512),
    tags=('short',)

            

Reported by Pylint.

Module 'operator_benchmark' has no 'cross_product_configs' member
Error

Line: 6 Column: 41

              import torch
import numpy as np

embeddingbag_conversion_short_configs = op_bench.cross_product_configs(
    num_embeddings=(80,),
    embedding_dim=(128, 256, 512),
    tags=('short',)
)


            

Reported by Pylint.

Module 'operator_benchmark' has no 'cross_product_configs' member
Error

Line: 12 Column: 40

                  tags=('short',)
)

embeddingbag_conversion_long_configs = op_bench.cross_product_configs(
    num_embeddings=(100, 120, 1000),
    embedding_dim=(16, 64, 128, 256, 512, 1024, 2048),
    tags=('long',)
)


            

Reported by Pylint.

Module 'operator_benchmark' has no 'op_list' member
Error

Line: 18 Column: 18

                  tags=('long',)
)

conversion_ops = op_bench.op_list(
    attrs=(
        ('qembeddingbag_byte_prepack', torch.ops.quantized.embedding_bag_byte_prepack),
        ('qembeddingbag_4bit_prepack', torch.ops.quantized.embedding_bag_4bit_prepack),
        ('qembeddingbag_2bit_prepack', torch.ops.quantized.embedding_bag_2bit_prepack),
    ),

            

Reported by Pylint.

Module 'operator_benchmark' has no 'op_list' member
Error

Line: 27 Column: 14

                  attr_names=('op_name', 'op_func'),
)

unpack_ops = op_bench.op_list(
    attrs=(
        ('qembeddingbag_byte_unpack', torch.ops.quantized.embedding_bag_byte_unpack),
        ('qembeddingbag_4bit_unpack', torch.ops.quantized.embedding_bag_4bit_unpack),
        ('qembeddingbag_2bit_unpack', torch.ops.quantized.embedding_bag_2bit_unpack),
    ),

            

Reported by Pylint.

Module 'operator_benchmark' has no 'TorchBenchmarkBase' member
Error

Line: 36 Column: 36

                  attr_names=('op_name', 'op_func'),
)

class EmbeddingBagFloatToFusedBase(op_bench.TorchBenchmarkBase):
    def init(self, num_embeddings, embedding_dim, op_func):
        self.inputs = {
            "weight": torch.from_numpy((np.random.random_sample((
                num_embeddings, embedding_dim)) + 1).astype(np.float32))
        }

            

Reported by Pylint.

Module 'operator_benchmark' has no 'TorchBenchmarkBase' member
Error

Line: 47 Column: 36

                  def forward(self, weight):
        return self.op_func(weight)

class EmbeddingBagFusedToFloatBase(op_bench.TorchBenchmarkBase):
    def init(self, num_embeddings, embedding_dim, op_func):
        weight = torch.randn(num_embeddings, embedding_dim + 8, dtype=torch.float)
        self.inputs = {
            "packed_weight": weight.to(torch.uint8)
        }

            

Reported by Pylint.

Module 'operator_benchmark' has no 'generate_pt_tests_from_op_list' member
Error

Line: 59 Column: 1

                      return self.op_func(packed_weight)


op_bench.generate_pt_tests_from_op_list(conversion_ops,
                                        embeddingbag_conversion_short_configs + embeddingbag_conversion_long_configs,
                                        EmbeddingBagFloatToFusedBase)
op_bench.generate_pt_tests_from_op_list(unpack_ops,
                                        embeddingbag_conversion_short_configs + embeddingbag_conversion_long_configs,
                                        EmbeddingBagFusedToFloatBase)

            

Reported by Pylint.

Module 'operator_benchmark' has no 'generate_pt_tests_from_op_list' member
Error

Line: 62 Column: 1

              op_bench.generate_pt_tests_from_op_list(conversion_ops,
                                        embeddingbag_conversion_short_configs + embeddingbag_conversion_long_configs,
                                        EmbeddingBagFloatToFusedBase)
op_bench.generate_pt_tests_from_op_list(unpack_ops,
                                        embeddingbag_conversion_short_configs + embeddingbag_conversion_long_configs,
                                        EmbeddingBagFusedToFloatBase)

if __name__ == "__main__":
    op_bench.benchmark_runner.main()

            

Reported by Pylint.

Attribute 'inputs' defined outside __init__
Error

Line: 38 Column: 9

              
class EmbeddingBagFloatToFusedBase(op_bench.TorchBenchmarkBase):
    def init(self, num_embeddings, embedding_dim, op_func):
        self.inputs = {
            "weight": torch.from_numpy((np.random.random_sample((
                num_embeddings, embedding_dim)) + 1).astype(np.float32))
        }
        self.op_func = op_func


            

Reported by Pylint.

caffe2/python/models/shufflenet.py
22 issues
String statement has no effect
Error

Line: 10 Column: 1

              
from caffe2.python import brew

"""
Utilitiy for creating ShuffleNet
"ShuffleNet V2: Practical Guidelines for EfficientCNN Architecture Design" by Ma et. al. 2018
"""

OUTPUT_CHANNELS = {

            

Reported by Pylint.

Attribute 'last_conv' defined outside __init__
Error

Line: 70 Column: 9

                              )
                self.add_basic_unit(prefix, in_channels)

        self.last_conv = brew.conv(self.model, self.prev_blob, 'conv5',
                                   in_channels, self.output_channels[4],
                                   kernel=1)
        self.avg_pool = self.model.AveragePool(self.last_conv, 'avg_pool',
                                               kernel=7)
        self.last_out = brew.fc(self.model,

            

Reported by Pylint.

Attribute 'avg_pool' defined outside __init__
Error

Line: 73 Column: 9

                      self.last_conv = brew.conv(self.model, self.prev_blob, 'conv5',
                                   in_channels, self.output_channels[4],
                                   kernel=1)
        self.avg_pool = self.model.AveragePool(self.last_conv, 'avg_pool',
                                               kernel=7)
        self.last_out = brew.fc(self.model,
                                self.avg_pool,
                                'last_out_L{}'.format(self.num_labels),
                                self.output_channels[4],

            

Reported by Pylint.

Attribute 'last_out' defined outside __init__
Error

Line: 75 Column: 9

                                                 kernel=1)
        self.avg_pool = self.model.AveragePool(self.last_conv, 'avg_pool',
                                               kernel=7)
        self.last_out = brew.fc(self.model,
                                self.avg_pool,
                                'last_out_L{}'.format(self.num_labels),
                                self.output_channels[4],
                                self.num_labels)


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # Module caffe2.python.models.shufflenet






from caffe2.python import brew


            

Reported by Pylint.

Too many instance attributes (13/7)
Error

Line: 23 Column: 1

              }


class ShuffleNetV2Builder():
    def __init__(
        self,
        model,
        data,
        num_input_channels,

            

Reported by Pylint.

Missing class docstring
Error

Line: 23 Column: 1

              }


class ShuffleNetV2Builder():
    def __init__(
        self,
        model,
        data,
        num_input_channels,

            

Reported by Pylint.

Too many arguments (10/5)
Error

Line: 24 Column: 5

              

class ShuffleNetV2Builder():
    def __init__(
        self,
        model,
        data,
        num_input_channels,
        num_labels,

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 47 Column: 5

                      self.detection = detection
        self.bn_epsilon = bn_epsilon

    def create(self):
        in_channels = self.output_channels[0]

        self.prev_blob = brew.conv(self.model, self.prev_blob, 'stage1_conv',
                                   self.num_input_channels, in_channels,
                                   weight_init=("MSRAFill", {}),

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 82 Column: 5

                                              self.num_labels)

    # spatial down sampling unit with stride=2
    def add_spatial_ds_unit(self, prefix, in_channels, out_channels, stride=2):
        right = left = self.prev_blob
        out_channels = out_channels // 2

        # Enlarge the receptive field for detection task
        if self.detection:

            

Reported by Pylint.