The following issues were found

torch/distributions/lowrank_multivariate_normal.py
49 issues
Module 'torch' has no 'matmul' member
Error

Line: 17 Column: 9

                  """
    m = W.size(-1)
    Wt_Dinv = W.transpose(-1, -2) / D.unsqueeze(-2)
    K = torch.matmul(Wt_Dinv, W).contiguous()
    K.view(-1, m * m)[:, ::m + 1] += 1  # add identity matrix to K
    return torch.linalg.cholesky(K)


def _batch_lowrank_logdet(W, D, capacitance_tril):

            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 114 Column: 23

              
    def expand(self, batch_shape, _instance=None):
        new = self._get_checked_instance(LowRankMultivariateNormal, _instance)
        batch_shape = torch.Size(batch_shape)
        loc_shape = batch_shape + self.event_shape
        new.loc = self.loc.expand(loc_shape)
        new.cov_diag = self.cov_diag.expand(loc_shape)
        new.cov_factor = self.cov_factor.expand(loc_shape + self.cov_factor.shape[-1:])
        new._unbroadcasted_cov_factor = self._unbroadcasted_cov_factor

            

Reported by Pylint.

Module 'torch' has no 'matmul' member
Error

Line: 147 Column: 13

                      n = self._event_shape[0]
        cov_diag_sqrt_unsqueeze = self._unbroadcasted_cov_diag.sqrt().unsqueeze(-1)
        Dinvsqrt_W = self._unbroadcasted_cov_factor / cov_diag_sqrt_unsqueeze
        K = torch.matmul(Dinvsqrt_W, Dinvsqrt_W.transpose(-1, -2)).contiguous()
        K.view(-1, n * n)[:, ::n + 1] += 1  # add identity matrix to K
        scale_tril = cov_diag_sqrt_unsqueeze * torch.linalg.cholesky(K)
        return scale_tril.expand(self._batch_shape + self._event_shape + self._event_shape)

    @lazy_property

            

Reported by Pylint.

Module 'torch' has no 'matmul' member
Error

Line: 154 Column: 30

              
    @lazy_property
    def covariance_matrix(self):
        covariance_matrix = (torch.matmul(self._unbroadcasted_cov_factor,
                                          self._unbroadcasted_cov_factor.transpose(-1, -2))
                             + torch.diag_embed(self._unbroadcasted_cov_diag))
        return covariance_matrix.expand(self._batch_shape + self._event_shape +
                                        self._event_shape)


            

Reported by Pylint.

Module 'torch' has no 'diag_embed' member
Error

Line: 156 Column: 32

                  def covariance_matrix(self):
        covariance_matrix = (torch.matmul(self._unbroadcasted_cov_factor,
                                          self._unbroadcasted_cov_factor.transpose(-1, -2))
                             + torch.diag_embed(self._unbroadcasted_cov_diag))
        return covariance_matrix.expand(self._batch_shape + self._event_shape +
                                        self._event_shape)

    @lazy_property
    def precision_matrix(self):

            

Reported by Pylint.

Module 'torch' has no 'triangular_solve' member
Error

Line: 167 Column: 13

                      # where :math:`C` is the capacitance matrix.
        Wt_Dinv = (self._unbroadcasted_cov_factor.transpose(-1, -2)
                   / self._unbroadcasted_cov_diag.unsqueeze(-2))
        A = torch.triangular_solve(Wt_Dinv, self._capacitance_tril, upper=False)[0]
        precision_matrix = (torch.diag_embed(self._unbroadcasted_cov_diag.reciprocal())
                            - torch.matmul(A.transpose(-1, -2), A))
        return precision_matrix.expand(self._batch_shape + self._event_shape +
                                       self._event_shape)


            

Reported by Pylint.

Module 'torch' has no 'diag_embed' member
Error

Line: 168 Column: 29

                      Wt_Dinv = (self._unbroadcasted_cov_factor.transpose(-1, -2)
                   / self._unbroadcasted_cov_diag.unsqueeze(-2))
        A = torch.triangular_solve(Wt_Dinv, self._capacitance_tril, upper=False)[0]
        precision_matrix = (torch.diag_embed(self._unbroadcasted_cov_diag.reciprocal())
                            - torch.matmul(A.transpose(-1, -2), A))
        return precision_matrix.expand(self._batch_shape + self._event_shape +
                                       self._event_shape)

    def rsample(self, sample_shape=torch.Size()):

            

Reported by Pylint.

Module 'torch' has no 'matmul' member
Error

Line: 169 Column: 31

                                 / self._unbroadcasted_cov_diag.unsqueeze(-2))
        A = torch.triangular_solve(Wt_Dinv, self._capacitance_tril, upper=False)[0]
        precision_matrix = (torch.diag_embed(self._unbroadcasted_cov_diag.reciprocal())
                            - torch.matmul(A.transpose(-1, -2), A))
        return precision_matrix.expand(self._batch_shape + self._event_shape +
                                       self._event_shape)

    def rsample(self, sample_shape=torch.Size()):
        shape = self._extended_shape(sample_shape)

            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 173 Column: 36

                      return precision_matrix.expand(self._batch_shape + self._event_shape +
                                       self._event_shape)

    def rsample(self, sample_shape=torch.Size()):
        shape = self._extended_shape(sample_shape)
        W_shape = shape[:-1] + self.cov_factor.shape[-1:]
        eps_W = _standard_normal(W_shape, dtype=self.loc.dtype, device=self.loc.device)
        eps_D = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device)
        return (self.loc + _batch_mv(self._unbroadcasted_cov_factor, eps_W)

            

Reported by Pylint.

Unused argument 'W'
Error

Line: 22 Column: 27

                  return torch.linalg.cholesky(K)


def _batch_lowrank_logdet(W, D, capacitance_tril):
    r"""
    Uses "matrix determinant lemma"::
        log|W @ W.T + D| = log|C| + log|D|,
    where :math:`C` is the capacitance matrix :math:`I + W.T @ inv(D) @ W`, to compute
    the log determinant.

            

Reported by Pylint.

torch/cuda/memory.py
49 issues
Attempted relative import beyond top-level package
Error

Line: 7 Column: 1

              from typing import Any, Dict, Union

import torch
from . import is_initialized, _get_device_index, _lazy_init
from torch.types import Device

def _host_allocator():
    _lazy_init()
    return torch._C._cuda_cudaHostAllocator()

            

Reported by Pylint.

Unable to import 'pynvml'
Error

Line: 549 Column: 9

                  """

    try:
        import pynvml  # type: ignore[import]
    except ModuleNotFoundError:
        return("pynvml module not found, please install pynvml")
    from pynvml import NVMLError_DriverNotLoaded
    try:
        pynvml.nvmlInit()

            

Reported by Pylint.

Unable to import 'pynvml'
Error

Line: 552 Column: 5

                      import pynvml  # type: ignore[import]
    except ModuleNotFoundError:
        return("pynvml module not found, please install pynvml")
    from pynvml import NVMLError_DriverNotLoaded
    try:
        pynvml.nvmlInit()
    except NVMLError_DriverNotLoaded:
        return ("cuda driver can't be loaded, is cuda enabled?")
    device = _get_device_index(device, optional=True)

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 12 Column: 12

              
def _host_allocator():
    _lazy_init()
    return torch._C._cuda_cudaHostAllocator()


@contextlib.contextmanager
def _free_mutex():
    torch._C._cuda_lock_mutex()

            

Reported by Pylint.

Access to a protected member _cuda_cudaHostAllocator of a client class
Error

Line: 12 Column: 12

              
def _host_allocator():
    _lazy_init()
    return torch._C._cuda_cudaHostAllocator()


@contextlib.contextmanager
def _free_mutex():
    torch._C._cuda_lock_mutex()

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 17 Column: 5

              
@contextlib.contextmanager
def _free_mutex():
    torch._C._cuda_lock_mutex()
    try:
        yield
    finally:
        torch._C._cuda_unlock_mutex()


            

Reported by Pylint.

Access to a protected member _cuda_lock_mutex of a client class
Error

Line: 17 Column: 5

              
@contextlib.contextmanager
def _free_mutex():
    torch._C._cuda_lock_mutex()
    try:
        yield
    finally:
        torch._C._cuda_unlock_mutex()


            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 21 Column: 9

                  try:
        yield
    finally:
        torch._C._cuda_unlock_mutex()


def caching_allocator_alloc(size, device: Union[Device, int] = None, stream=None):
    r"""Performs a memory allocation using the CUDA memory allocator.


            

Reported by Pylint.

Access to a protected member _cuda_unlock_mutex of a client class
Error

Line: 21 Column: 9

                  try:
        yield
    finally:
        torch._C._cuda_unlock_mutex()


def caching_allocator_alloc(size, device: Union[Device, int] = None, stream=None):
    r"""Performs a memory allocation using the CUDA memory allocator.


            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 55 Column: 16

                                      '`torch.cuda.Stream` or `int` representing a pointer '
                        'to a exisiting stream')
    with torch.cuda.device(device):
        return torch._C._cuda_cudaCachingAllocator_raw_alloc(size, stream)


def caching_allocator_delete(mem_ptr):
    r"""Deletes memory allocated using the CUDA memory allocator.


            

Reported by Pylint.

test/custom_operator/test_custom_classes.py
49 issues
Unable to import 'torch'
Error

Line: 2 Column: 1

              import unittest
import torch
from torch import ops
import torch.jit as jit
import glob
import os

from torch.testing._internal.common_utils import TestCase, run_tests


            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 3 Column: 1

              import unittest
import torch
from torch import ops
import torch.jit as jit
import glob
import os

from torch.testing._internal.common_utils import TestCase, run_tests


            

Reported by Pylint.

Unable to import 'torch.jit'
Error

Line: 4 Column: 1

              import unittest
import torch
from torch import ops
import torch.jit as jit
import glob
import os

from torch.testing._internal.common_utils import TestCase, run_tests


            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 8 Column: 1

              import glob
import os

from torch.testing._internal.common_utils import TestCase, run_tests


def get_custom_class_library_path():
    library_filename = glob.glob("build/*custom_class*")
    assert (len(library_filename) == 1)

            

Reported by Pylint.

Access to a protected member _TorchScriptTesting of a client class
Error

Line: 30 Column: 19

              
    def test_no_return_class(self):
        def f():
            val = torch.classes._TorchScriptTesting._Foo(5, 3)
            return val.info()
        self.assertEqual(*test_equality(f, lambda x: x))

    def test_constructor_with_args(self):
        def f():

            

Reported by Pylint.

Access to a protected member _Foo of a client class
Error

Line: 30 Column: 19

              
    def test_no_return_class(self):
        def f():
            val = torch.classes._TorchScriptTesting._Foo(5, 3)
            return val.info()
        self.assertEqual(*test_equality(f, lambda x: x))

    def test_constructor_with_args(self):
        def f():

            

Reported by Pylint.

Access to a protected member _Foo of a client class
Error

Line: 36 Column: 19

              
    def test_constructor_with_args(self):
        def f():
            val = torch.classes._TorchScriptTesting._Foo(5, 3)
            return val
        self.assertEqual(*test_equality(f, lambda x: x.info()))

    def test_function_call_with_args(self):
        def f():

            

Reported by Pylint.

Access to a protected member _TorchScriptTesting of a client class
Error

Line: 36 Column: 19

              
    def test_constructor_with_args(self):
        def f():
            val = torch.classes._TorchScriptTesting._Foo(5, 3)
            return val
        self.assertEqual(*test_equality(f, lambda x: x.info()))

    def test_function_call_with_args(self):
        def f():

            

Reported by Pylint.

Access to a protected member _Foo of a client class
Error

Line: 42 Column: 19

              
    def test_function_call_with_args(self):
        def f():
            val = torch.classes._TorchScriptTesting._Foo(5, 3)
            val.increment(1)
            return val

        self.assertEqual(*test_equality(f, lambda x: x.info()))


            

Reported by Pylint.

Access to a protected member _TorchScriptTesting of a client class
Error

Line: 42 Column: 19

              
    def test_function_call_with_args(self):
        def f():
            val = torch.classes._TorchScriptTesting._Foo(5, 3)
            val.increment(1)
            return val

        self.assertEqual(*test_equality(f, lambda x: x.info()))


            

Reported by Pylint.

test/test_jit.py
49 issues
expected an indented block (<unknown>, line 12722)
Error

Line: 12722 Column: 26

                                            c   # type: Tensor
                              ):
                # type: (int, int, int) -> Tensor
                # type: bad type line  # noqa: F723

                return a + b + c

        with self.assertRaisesRegex(RuntimeError, "Return type line"):
            @torch.jit.script

            

Reported by Pylint.

Use of possibly insecure function - consider using safer ast.literal_eval.
Security blacklist

Line: 2164
Suggestion: https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b307-eval

                      for i in range(len(constants)):
            # check_constant constructs the second dict with another Tensor
            # which fails the comparison
            if not isinstance(eval(constants[i]), (str, int, float)):
                continue
            for j in range(len(constants)):
                dict_constant = "{ " + constants[i] + ": " + constants[j] + "}"
                check_constant(dict_constant)
                dict_constants.append(dict_constant)

            

Reported by Bandit.

Pickle and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue.
Security blacklist

Line: 4320
Suggestion: https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b301-pickle

                          debug_files = list(filter(lambda f: f.endswith('.debug_pkl'), files))
            self.assertEqual(len(debug_files), 1)
            debug_file = archive.open(debug_files[0])
            return pickle.load(debug_file), buffer

        records1, buffer = debug_records_from_mod(self, ft3)

        buffer.seek(0)
        loaded = torch.jit.load(buffer)

            

Reported by Bandit.

Pickle and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue.
Security blacklist

Line: 4370
Suggestion: https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b301-pickle

                          files = list(filter(lambda x: x.startswith('archive/code/'), archive.namelist()))
            debug_files = filter(lambda f: f.endswith('.debug_pkl'), files)
            debug_files = (archive.open(f) for f in debug_files)
            debug_files = (pickle.load(f) for f in debug_files)
            return list(debug_files)

        debug_files = debug_records_from_mod(ft3)
        for debug_file in debug_files:
            for i in range(len(debug_file) - 1):

            

Reported by Bandit.

Use of exec detected.
Security

Line: 4788
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b102_exec_used.html

              
    def _check_code(self, code_str, fn_name, inputs):
        scope = {}
        exec(code_str, globals(), scope)
        cu = torch.jit.CompilationUnit(code_str)
        self.assertEqual(cu.func(*inputs), scope[fn_name](*inputs))

    @unittest.skipIf(not RUN_CUDA, 'no CUDA')
    def test_scriptmodule_releases_tensors_cuda(self):

            

Reported by Bandit.

Use of exec detected.
Security

Line: 7116
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b102_exec_used.html

                          for inp in inputs:
                code = tensor_template.format(tensor_op=op, input=inp)
                scope = {}
                exec(code, globals(), scope)
                cu = torch.jit.CompilationUnit(code)
                t1 = cu.func()
                t2 = scope['func']()
                if inp == 'empty_list':
                    # torchscript returns int tensor, python returns float tensor

            

Reported by Bandit.

Use of exec detected.
Security

Line: 7144
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b102_exec_used.html

                          for inp, expect in zip(inputs, expected_shape):
                code = tensor_template.format(tensor_op=op, input=inp)
                scope = {}
                exec(code, globals(), scope)
                cu = torch.jit.CompilationUnit(code)
                torch._C._jit_pass_complete_shape_analysis(cu.func.graph, (), False)
                FileCheck().check(expect).check("aten::{tensor_op}".format(tensor_op=op)).run(cu.func.graph)

        @torch.jit.script

            

Reported by Bandit.

Use of exec detected.
Security

Line: 7240
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b102_exec_used.html

                                      continue
                    code = tensor_template.format(list_create=li, tensor_op=op, options=option)
                    scope = {}
                    exec(code, globals(), scope)
                    cu = torch.jit.CompilationUnit(code)
                    t1 = cu.func()
                    t2 = scope['func']()
                    if t1.dtype == torch.float16:  # equality NYI for half tensor
                        self.assertTrue(str(t1) == str(t2))

            

Reported by Bandit.

Use of exec detected.
Security

Line: 8373
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b102_exec_used.html

                                  # print("testing {}".format(return_line))
                    code = template.format(return_line=return_line)
                    scope = {}
                    exec(code, globals(), scope)
                    cu = torch.jit.CompilationUnit(code)
                    graph = cu.func.graph
                    torch._C._jit_pass_complete_shape_analysis(graph, (), False)
                    input_array = [1, 2, 3]
                    for _ in range(1, input_dims):

            

Reported by Bandit.

Use of exec detected.
Security

Line: 8443
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b102_exec_used.html

                                  # print("testing {}".format(return_line))
                    code = template.format(first_arg, second_arg, op)
                    scope = {}
                    exec(code, globals(), scope)
                    non_jit_result = scope['func']()

                    cu = torch.jit.CompilationUnit(code)
                    graph = cu.func.graph
                    torch._C._jit_pass_complete_shape_analysis(graph, (), False)

            

Reported by Bandit.

scripts/model_zoo/update-caffe2-models.py
49 issues
Unable to import 'caffe2.python.models.download'
Error

Line: 11 Column: 1

              
from six.moves.urllib.request import urlretrieve

from caffe2.python.models.download import downloadFromURLToFile, getURLFromName, deleteDirectory

class SomeClass:
    # largely copied from
    # https://github.com/onnx/onnx-caffe2/blob/master/tests/caffe2_ref_test.py
    def _download(self, model):

            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 32 Column: 20

                                  # Caffe2 78c014e752a374d905ecfb465d44fa16e02a28f1
                    # (Sep 17, 2017)
                    downloadFromURLToFile(url, dest)
            except Exception as e:
                print("Abort: {reason}".format(reason=e))
                print("Cleaning up...")
                deleteDirectory(model_dir)
                exit(1)


            

Reported by Pylint.

Audit url open for permitted schemes. Allowing use of file:/ or custom schemes is often unexpected.
Security blacklist

Line: 64
Suggestion: https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b310-urllib-urlopen

                      try:
            download_file.close()
            print('Start downloading model {} from {}'.format(model, url))
            urlretrieve(url, download_file.name)
            print('Done')
            with tarfile.open(download_file.name) as t:
                t.extractall(models_dir)
        except Exception as e:
            print('Failed to prepare data for model {}: {}'.format(model, e))

            

Reported by Bandit.

TODO currently onnx can't translate squeezenet :(
Error

Line: 81 Column: 3

                  'inception_v2',
    'resnet50',

    # TODO currently onnx can't translate squeezenet :(
    # 'squeezenet',

    'vgg16',

    # TODO currently vgg19 doesn't work in the CI environment,

            

Reported by Pylint.

TODO currently vgg19 doesn't work in the CI environment,
Error

Line: 86 Column: 3

              
    'vgg16',

    # TODO currently vgg19 doesn't work in the CI environment,
    # possibly due to OOM
    # 'vgg19'
]

def download_models():

            

Reported by Pylint.

Access to a protected member _caffe2_model_dir of a client class
Error

Line: 95 Column: 28

                  sc = SomeClass()
    for model in models:
        print('update-caffe2-models.py:  downloading', model)
        caffe2_model_dir = sc._caffe2_model_dir(model)
        onnx_model_dir, onnx_models_dir = sc._onnx_model_dir(model)
        if not os.path.exists(caffe2_model_dir):
            sc._download(model)
        if not os.path.exists(onnx_model_dir):
            sc._prepare_model_data(model)

            

Reported by Pylint.

Access to a protected member _onnx_model_dir of a client class
Error

Line: 96 Column: 43

                  for model in models:
        print('update-caffe2-models.py:  downloading', model)
        caffe2_model_dir = sc._caffe2_model_dir(model)
        onnx_model_dir, onnx_models_dir = sc._onnx_model_dir(model)
        if not os.path.exists(caffe2_model_dir):
            sc._download(model)
        if not os.path.exists(onnx_model_dir):
            sc._prepare_model_data(model)


            

Reported by Pylint.

Unused variable 'onnx_models_dir'
Error

Line: 96 Column: 25

                  for model in models:
        print('update-caffe2-models.py:  downloading', model)
        caffe2_model_dir = sc._caffe2_model_dir(model)
        onnx_model_dir, onnx_models_dir = sc._onnx_model_dir(model)
        if not os.path.exists(caffe2_model_dir):
            sc._download(model)
        if not os.path.exists(onnx_model_dir):
            sc._prepare_model_data(model)


            

Reported by Pylint.

Access to a protected member _download of a client class
Error

Line: 98 Column: 13

                      caffe2_model_dir = sc._caffe2_model_dir(model)
        onnx_model_dir, onnx_models_dir = sc._onnx_model_dir(model)
        if not os.path.exists(caffe2_model_dir):
            sc._download(model)
        if not os.path.exists(onnx_model_dir):
            sc._prepare_model_data(model)

def generate_models():
    sc = SomeClass()

            

Reported by Pylint.

Access to a protected member _prepare_model_data of a client class
Error

Line: 100 Column: 13

                      if not os.path.exists(caffe2_model_dir):
            sc._download(model)
        if not os.path.exists(onnx_model_dir):
            sc._prepare_model_data(model)

def generate_models():
    sc = SomeClass()
    for model in models:
        print('update-caffe2-models.py:  generating', model)

            

Reported by Pylint.

torch/distributions/constraints.py
49 issues
Module 'torch' has no 'all' member; maybe 'dll'?
Error

Line: 399 Column: 16

                  event_dim = 1

    def check(self, value):
        return torch.all(value >= 0, dim=-1) & ((value.sum(-1) - 1).abs() < 1e-6)


class _Multinomial(Constraint):
    """
    Constrain to nonnegative integer values summing to at most an upper bound.

            

Reported by Pylint.

Module 'torch' has no 'finfo' member
Error

Line: 453 Column: 15

                  event_dim = 2

    def check(self, value):
        tol = torch.finfo(value.dtype).eps * value.size(-1) * 10  # 10 is an adjustable fudge factor
        row_norm = torch.linalg.norm(value.detach(), dim=-1)
        unit_row_norm = (row_norm - 1.).abs().le(tol).all(dim=-1)
        return _LowerCholesky().check(value) & unit_row_norm



            

Reported by Pylint.

Module 'torch' has no 'cat' member
Error

Line: 503 Column: 16

                          v = value.narrow(self.dim, start, length)
            checks.append(constr.check(v))
            start = start + length  # avoid += for jit compat
        return torch.cat(checks, self.dim)


class _Stack(Constraint):
    """
    Constraint functor that applies a sequence of constraints

            

Reported by Pylint.

Module 'torch' has no 'stack' member
Error

Line: 532 Column: 16

                  def check(self, value):
        assert -value.dim() <= self.dim < value.dim()
        vs = [value.select(self.dim, i) for i in range(value.size(self.dim))]
        return torch.stack([constr.check(v)
                            for v, constr in zip(vs, self.cseq)], self.dim)


# Public interface.
dependent = _Dependent()

            

Reported by Pylint.

Parameters differ from overridden 'check' method
Error

Line: 131 Column: 5

                          event_dim = self._event_dim
        return _Dependent(is_discrete=is_discrete, event_dim=event_dim)

    def check(self, x):
        raise ValueError('Cannot determine validity of dependent constraint')


def is_dependent(constraint):
    return isinstance(constraint, _Dependent)

            

Reported by Pylint.

Parameters differ from overridden 'check' method
Error

Line: 416 Column: 5

                  def __init__(self, upper_bound):
        self.upper_bound = upper_bound

    def check(self, x):
        return (x >= 0).all(dim=-1) & (x.sum(dim=-1) <= self.upper_bound)


class _LowerTriangular(Constraint):
    """

            

Reported by Pylint.

Redefining name 'lower_triangular' from outer scope (line 556)
Error

Line: 439 Column: 9

              
    def check(self, value):
        value_tril = value.tril()
        lower_triangular = (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0]

        positive_diagonal = (value.diagonal(dim1=-2, dim2=-1) > 0).min(-1)[0]
        return lower_triangular & positive_diagonal



            

Reported by Pylint.

Class 'Constraint' inherits from object, can be safely removed from bases in python3
Error

Line: 61 Column: 1

              ]


class Constraint(object):
    """
    Abstract base class for constraints.

    A constraint object represents a region over which a variable is valid,
    e.g. within which a variable can be optimized.

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 108 Column: 5

                      super().__init__()

    @property
    def is_discrete(self):
        if self._is_discrete is NotImplemented:
            raise NotImplementedError(".is_discrete cannot be determined statically")
        return self._is_discrete

    @property

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 114 Column: 5

                      return self._is_discrete

    @property
    def event_dim(self):
        if self._event_dim is NotImplemented:
            raise NotImplementedError(".event_dim cannot be determined statically")
        return self._event_dim

    def __call__(self, *, is_discrete=NotImplemented, event_dim=NotImplemented):

            

Reported by Pylint.

tools/actions_local_runner.py
49 issues
Function call with shell=True parameter identified, possible security issue.
Security injection

Line: 117
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b604_any_other_function_with_shell_equals_true.html

              
    proc = await asyncio.create_subprocess_shell(
        cmd_str,
        shell=True,
        cwd=REPO_ROOT,
        env=env,
        stdout=subprocess.PIPE if redirect else None,
        stderr=subprocess.PIPE if redirect else None,
        executable=shutil.which("bash"),

            

Reported by Bandit.

Method 'quick' is abstract in class 'Check' but is not overridden
Error

Line: 317 Column: 1

                      )


class YamlStep(Check):
    def __init__(self, step: Dict[str, Any], job_name: str, quiet: bool):
        super().__init__(files=None, quiet=quiet)
        self.step = step
        self.name = f'{job_name}: {self.step["name"]}'


            

Reported by Pylint.

Probable insecure usage of temp file/directory.
Security

Line: 325
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b108_hardcoded_tmp_directory.html

              
    async def full(self) -> CommandResult:
        env = os.environ.copy()
        env["GITHUB_WORKSPACE"] = "/tmp"
        script = self.step["run"]

        if self.quiet:
            # TODO: Either lint that GHA scripts only use 'set -eux' or make this more
            # resilient

            

Reported by Bandit.

TODO: Either lint that GHA scripts only use 'set -eux' or make this more
Error

Line: 329 Column: 3

                      script = self.step["run"]

        if self.quiet:
            # TODO: Either lint that GHA scripts only use 'set -eux' or make this more
            # resilient
            script = script.replace("set -eux", "set -eu")
            script = re.sub(r"^time ", "", script, flags=re.MULTILINE)

        return await shell_cmd(script, env=env)

            

Reported by Pylint.

Redefining name 'changed_files' from outer scope (line 337)
Error

Line: 338 Column: 5

              

def changed_files() -> Optional[List[str]]:
    changed_files: Optional[List[str]] = None
    try:
        changed_files = sorted(find_changed_files())
    except Exception:
        # If the git commands failed for some reason, bail out and use the whole list
        print(

            

Reported by Pylint.

Catching too general exception Exception
Error

Line: 341 Column: 12

                  changed_files: Optional[List[str]] = None
    try:
        changed_files = sorted(find_changed_files())
    except Exception:
        # If the git commands failed for some reason, bail out and use the whole list
        print(
            "Could not query git for changed files, falling back to testing all files instead",
            file=sys.stderr,
        )

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              #!/usr/bin/env python3
# -*- coding: utf-8 -*-

import subprocess
import sys
import os
import argparse
import yaml
import asyncio

            

Reported by Pylint.

Consider possible security implications associated with subprocess module.
Security blacklist

Line: 4
Suggestion: https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess

              #!/usr/bin/env python3
# -*- coding: utf-8 -*-

import subprocess
import sys
import os
import argparse
import yaml
import asyncio

            

Reported by Bandit.

standard import "import asyncio" should be placed before "import yaml"
Error

Line: 9 Column: 1

              import os
import argparse
import yaml
import asyncio
import shutil
import re
import fnmatch
import shlex
import configparser

            

Reported by Pylint.

standard import "import shutil" should be placed before "import yaml"
Error

Line: 10 Column: 1

              import argparse
import yaml
import asyncio
import shutil
import re
import fnmatch
import shlex
import configparser


            

Reported by Pylint.

caffe2/quantization/server/batch_matmul_dnnlowp_op_test.py
49 issues
Unable to import 'hypothesis.strategies'
Error

Line: 7 Column: 1

              from itertools import product

import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import (
    avoid_vpmaddubsw_overflow_fc,

            

Reported by Pylint.

Unable to import 'hypothesis'
Error

Line: 15 Column: 1

                  avoid_vpmaddubsw_overflow_fc,
    check_quantized_results_close,
)
from hypothesis import given, settings


dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])


            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 32 Column: 70

                      **hu.gcs_cpu_only
    )
    @settings(deadline=10000)
    def test_dnnlowp_batch_matmul_int(self, m, n, k, batch_size, gc, dc):
        # A and B have scale 1, so exactly represented after quantization
        A_min = -77
        A_max = A_min + 255
        A = np.round(np.random.rand(batch_size, m, k) * 255 + A_min)
        A = A.astype(np.float32)

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 129 Column: 79

                  )
    @settings(deadline=2000)
    def test_dnnlowp_batch_matmul_int_constant_B(
        self, m, n, k, C_1, C_2, A_quantized, B_quantized, out_quantized, gc, dc
    ):
        batch_dims = tuple(np.random.randint(3, size=max(C_1, C_2)))
        batch_dims_A = batch_dims[-C_1:]
        batch_dims_B = batch_dims[-C_2:]
        A = np.zeros(batch_dims_A + (m, k)).astype(np.float32)

            

Reported by Pylint.

Unused variable 'B_q_param'
Error

Line: 209 Column: 45

                                  net.Proto().op.extend([quantize_A])

                if do_quantize_B:
                    int8_given_tensor_fill, B_q_param = dnnlowp_utils.create_int8_given_tensor_fill(
                        B if trans_b else B.swapaxes(-1, -2), "B_q"
                    )
                    net.Proto().op.extend([int8_given_tensor_fill])

                batch_matmul = core.CreateOperator(

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              

import collections
from itertools import product

import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace

            

Reported by Pylint.

Missing class docstring
Error

Line: 22 Column: 1

              workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])


class DNNLowPBatchMatMulOpTest(hu.HypothesisTestCase):
    # correctness test with no quantization error in inputs
    @given(
        m=st.integers(0, 32),
        n=st.integers(4, 32),
        k=st.integers(4, 32),

            

Reported by Pylint.

Argument name "dc" doesn't conform to snake_case naming style
Error

Line: 31 Column: 5

                      batch_size=st.integers(0, 4),
        **hu.gcs_cpu_only
    )
    @settings(deadline=10000)
    def test_dnnlowp_batch_matmul_int(self, m, n, k, batch_size, gc, dc):
        # A and B have scale 1, so exactly represented after quantization
        A_min = -77
        A_max = A_min + 255
        A = np.round(np.random.rand(batch_size, m, k) * 255 + A_min)

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 31 Column: 5

                      batch_size=st.integers(0, 4),
        **hu.gcs_cpu_only
    )
    @settings(deadline=10000)
    def test_dnnlowp_batch_matmul_int(self, m, n, k, batch_size, gc, dc):
        # A and B have scale 1, so exactly represented after quantization
        A_min = -77
        A_max = A_min + 255
        A = np.round(np.random.rand(batch_size, m, k) * 255 + A_min)

            

Reported by Pylint.

Too many arguments (7/5)
Error

Line: 31 Column: 5

                      batch_size=st.integers(0, 4),
        **hu.gcs_cpu_only
    )
    @settings(deadline=10000)
    def test_dnnlowp_batch_matmul_int(self, m, n, k, batch_size, gc, dc):
        # A and B have scale 1, so exactly represented after quantization
        A_min = -77
        A_max = A_min + 255
        A = np.round(np.random.rand(batch_size, m, k) * 255 + A_min)

            

Reported by Pylint.

torch/utils/benchmark/utils/compare.py
48 issues
No name 'tensor' in module 'torch'
Error

Line: 8 Column: 1

              from typing import DefaultDict, List, Optional, Tuple

from torch.utils.benchmark.utils import common
from torch import tensor as _tensor

__all__ = ["Compare"]

BEST = "\033[92m"
GOOD = "\033[34m"

            

Reported by Pylint.

Missing class docstring
Error

Line: 20 Column: 1

              TERMINATE = "\033[0m"


class Colorize(enum.Enum):
    NONE = "none"
    COLUMNWISE = "columnwise"
    ROWWISE = "rowwise"



            

Reported by Pylint.

Class '_Column' inherits from object, can be safely removed from bases in python3
Error

Line: 27 Column: 1

              

# Classes to separate internal bookkeeping from what is rendered.
class _Column(object):
    def __init__(
        self,
        grouped_results: List[Tuple[Optional[common.Measurement], ...]],
        time_scale: float,
        time_unit: str,

            

Reported by Pylint.

Too many arguments (6/5)
Error

Line: 28 Column: 5

              
# Classes to separate internal bookkeeping from what is rendered.
class _Column(object):
    def __init__(
        self,
        grouped_results: List[Tuple[Optional[common.Measurement], ...]],
        time_scale: float,
        time_unit: str,
        trim_significant_figures: bool,

            

Reported by Pylint.

Line too long (104/100)
Error

Line: 56 Column: 1

                          if (m is not None) and (digits is not None)
        ) if self._trim_significant_figures else 1
        length = unit_digits + decimal_digits + (1 if decimal_digits else 0)
        self._template = f"{{:>{length}.{decimal_digits}f}}{{:>{7 if self._highlight_warnings else 0}}}"

    def get_results_for(self, group):
        return self._grouped_results[group]

    def num_to_str(self, value: Optional[float], estimated_sigfigs: int, spread: Optional[float]):

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 58 Column: 5

                      length = unit_digits + decimal_digits + (1 if decimal_digits else 0)
        self._template = f"{{:>{length}.{decimal_digits}f}}{{:>{7 if self._highlight_warnings else 0}}}"

    def get_results_for(self, group):
        return self._grouped_results[group]

    def num_to_str(self, value: Optional[float], estimated_sigfigs: int, spread: Optional[float]):
        if value is None:
            return " " * len(self.num_to_str(1, estimated_sigfigs, None))

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 61 Column: 5

                  def get_results_for(self, group):
        return self._grouped_results[group]

    def num_to_str(self, value: Optional[float], estimated_sigfigs: int, spread: Optional[float]):
        if value is None:
            return " " * len(self.num_to_str(1, estimated_sigfigs, None))

        if self._trim_significant_figures:
            value = common.trim_sigfig(value, estimated_sigfigs)

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 73 Column: 1

                          f" (! {spread * 100:.0f}%)" if self._highlight_warnings and spread is not None else "")


def optional_min(seq):
    l = list(seq)
    return None if len(l) == 0 else min(l)


class _Row(object):

            

Reported by Pylint.

Variable name "l" doesn't conform to snake_case naming style
Error

Line: 74 Column: 5

              

def optional_min(seq):
    l = list(seq)
    return None if len(l) == 0 else min(l)


class _Row(object):
    def __init__(self, results, row_group, render_env, env_str_len,

            

Reported by Pylint.

Too many instance attributes (9/7)
Error

Line: 78 Column: 1

                  return None if len(l) == 0 else min(l)


class _Row(object):
    def __init__(self, results, row_group, render_env, env_str_len,
                 row_name_str_len, time_scale, colorize, num_threads=None):
        super(_Row, self).__init__()
        self._results = results
        self._row_group = row_group

            

Reported by Pylint.

torch/optim/_functional.py
48 issues
Module 'torch' has no 'empty_like' member
Error

Line: 12 Column: 16

              def _make_sparse(grad, grad_indices, values):
    size = grad.size()
    if grad_indices.numel() == 0 or values.numel() == 0:
        return torch.empty_like(grad)
    return torch.sparse_coo_tensor(grad_indices, values, size)


def adagrad(params: List[Tensor],
            grads: List[Tensor],

            

Reported by Pylint.

Module 'torch' has no 'sparse_coo_tensor' member
Error

Line: 13 Column: 12

                  size = grad.size()
    if grad_indices.numel() == 0 or values.numel() == 0:
        return torch.empty_like(grad)
    return torch.sparse_coo_tensor(grad_indices, values, size)


def adagrad(params: List[Tensor],
            grads: List[Tensor],
            state_sums: List[Tensor],

            

Reported by Pylint.

Module 'torch' has no 'maximum' member
Error

Line: 90 Column: 13

                      exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
        if amsgrad:
            # Maintains the maximum of all 2nd moment running avg. till now
            torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
            # Use the max. for normalizing running avg. of gradient
            denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
        else:
            denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)


            

Reported by Pylint.

Module 'torch' has no 'maximum' member
Error

Line: 135 Column: 13

                      exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
        if amsgrad:
            # Maintains the maximum of all 2nd moment running avg. till now
            torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
            # Use the max. for normalizing running avg. of gradient
            denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
        else:
            denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)


            

Reported by Pylint.

Module 'torch' has no 'clone' member
Error

Line: 170 Column: 23

                          buf = momentum_buffer_list[i]

            if buf is None:
                buf = torch.clone(d_p).detach()
                momentum_buffer_list[i] = buf
            else:
                buf.mul_(momentum).add_(d_p, alpha=1 - dampening)

            if nesterov:

            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 278 Column: 41

              
        # for dir<0, dfdx=0
        # for dir>=0 dfdx=dfdx
        grad = grad.clone(memory_format=torch.preserve_format)
        grad[sign.eq(etaminus)] = 0

        # update parameters
        param.addcmul_(grad.sign(), step_size, value=-1)


            

Reported by Pylint.

Module 'torch' has no 'cat' member
Error

Line: 315 Column: 20

                      # Update biased first moment estimate.
        exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
        # Update the exponentially weighted infinity norm.
        norm_buf = torch.cat([
            exp_inf.mul_(beta2).unsqueeze(0),
            grad.abs().add_(eps).unsqueeze_(0)
        ], 0)
        torch.amax(norm_buf, 0, keepdim=False, out=exp_inf)


            

Reported by Pylint.

Module 'torch' has no 'amax' member
Error

Line: 319 Column: 9

                          exp_inf.mul_(beta2).unsqueeze(0),
            grad.abs().add_(eps).unsqueeze_(0)
        ], 0)
        torch.amax(norm_buf, 0, keepdim=False, out=exp_inf)

        bias_correction = 1 - beta1 ** step
        clr = lr / bias_correction

        param.addcdiv_(exp_avg, exp_inf, value=-clr)

            

Reported by Pylint.

TODO: use foreach API in optim._functional to do all the computation
Error

Line: 7 Column: 3

              from torch import Tensor
from typing import List, Optional

# TODO: use foreach API in optim._functional to do all the computation

def _make_sparse(grad, grad_indices, values):
    size = grad.size()
    if grad_indices.numel() == 0 or values.numel() == 0:
        return torch.empty_like(grad)

            

Reported by Pylint.

Access to a protected member _indices of a client class
Error

Line: 40 Column: 28

              
        if grad.is_sparse:
            grad = grad.coalesce()  # the update is non-linear so indices must be unique
            grad_indices = grad._indices()
            grad_values = grad._values()
            size = grad.size()

            state_sum.add_(_make_sparse(grad, grad_indices, grad_values.pow(2)))
            std = state_sum.sparse_mask(grad)

            

Reported by Pylint.