The following issues were found

third_party/miniz-2.0.8/miniz.c
37 issues
char - Statically-sized arrays can be improperly restricted, leading to potential overflows or other issues
Security

Line: 29 Column: 18 CWE codes: 119 120
Suggestion: Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length

              
#include  "miniz.h"

typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];

#ifdef __cplusplus
extern "C" {

            

Reported by FlawFinder.

char - Statically-sized arrays can be improperly restricted, leading to potential overflows or other issues
Security

Line: 30 Column: 18 CWE codes: 119 120
Suggestion: Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length

              #include  "miniz.h"

typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];

#ifdef __cplusplus
extern "C" {
#endif

            

Reported by FlawFinder.

char - Statically-sized arrays can be improperly restricted, leading to potential overflows or other issues
Security

Line: 31 Column: 18 CWE codes: 119 120
Suggestion: Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length

              
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];

#ifdef __cplusplus
extern "C" {
#endif


            

Reported by FlawFinder.

memcpy - Does not check for buffer overflows when copying to destination
Security

Line: 477 Column: 9 CWE codes: 120
Suggestion: Make sure destination can always hold the source data

                  if (pState->m_dict_avail)
    {
        n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
        memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
        pStream->next_out += n;
        pStream->avail_out -= n;
        pStream->total_out += n;
        pState->m_dict_avail -= n;
        pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);

            

Reported by FlawFinder.

memcpy - Does not check for buffer overflows when copying to destination
Security

Line: 502 Column: 9 CWE codes: 120
Suggestion: Make sure destination can always hold the source data

                      pState->m_dict_avail = (mz_uint)out_bytes;

        n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
        memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
        pStream->next_out += n;
        pStream->avail_out -= n;
        pStream->total_out += n;
        pState->m_dict_avail -= n;
        pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);

            

Reported by FlawFinder.

memcpy - Does not check for buffer overflows when copying to destination
Security

Line: 989 Column: 5 CWE codes: 120
Suggestion: Make sure destination can always hold the source data

                      if (d->m_huff_code_sizes[1][num_dist_codes - 1])
            break;

    memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
    memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes);
    total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
    num_packed_code_sizes = 0;
    rle_z_count = 0;
    rle_repeat_count = 0;

            

Reported by FlawFinder.

memcpy - Does not check for buffer overflows when copying to destination
Security

Line: 990 Column: 5 CWE codes: 120
Suggestion: Make sure destination can always hold the source data

                          break;

    memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
    memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes);
    total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
    num_packed_code_sizes = 0;
    rle_z_count = 0;
    rle_repeat_count = 0;


            

Reported by FlawFinder.

memcpy - Does not check for buffer overflows when copying to destination
Security

Line: 1363 Column: 13 CWE codes: 120
Suggestion: Make sure destination can always hold the source data

                      else if (pOutput_buf_start == d->m_output_buf)
        {
            int bytes_to_copy = (int)MZ_MIN((size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
            memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy);
            d->m_out_buf_ofs += bytes_to_copy;
            if ((n -= bytes_to_copy) != 0)
            {
                d->m_output_flush_ofs = bytes_to_copy;
                d->m_output_flush_remaining = n;

            

Reported by FlawFinder.

memcpy - Does not check for buffer overflows when copying to destination
Security

Line: 1385 Column: 2 CWE codes: 120
Suggestion: Make sure destination can always hold the source data

              static inline mz_uint16 TDEFL_READ_UNALIGNED_WORD(const mz_uint8* p)
{
	mz_uint16 ret;
	memcpy(&ret, p, sizeof(mz_uint16));
	return ret;
}
static inline mz_uint16 TDEFL_READ_UNALIGNED_WORD2(const mz_uint16* p)
{
	mz_uint16 ret;

            

Reported by FlawFinder.

memcpy - Does not check for buffer overflows when copying to destination
Security

Line: 1391 Column: 2 CWE codes: 120
Suggestion: Make sure destination can always hold the source data

              static inline mz_uint16 TDEFL_READ_UNALIGNED_WORD2(const mz_uint16* p)
{
	mz_uint16 ret;
	memcpy(&ret, p, sizeof(mz_uint16));
	return ret;
}
#else
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
#define TDEFL_READ_UNALIGNED_WORD2(p) *(const mz_uint16 *)(p)

            

Reported by FlawFinder.

benchmarks/operator_benchmark/pt/unary_test.py
37 issues
Unable to import 'torch'
Error

Line: 3 Column: 1

              
import operator_benchmark as op_bench
import torch


"""Microbenchmarks for point-wise unary operator."""


# Configs for pointwise unary ops

            

Reported by Pylint.

Module 'operator_benchmark' has no 'config_list' member
Error

Line: 10 Column: 27

              

# Configs for pointwise unary ops
unary_ops_configs_short = op_bench.config_list(
    attr_names=['M', 'N'],
    attrs=[
        [512, 512],
    ],
    cross_product_configs={

            

Reported by Pylint.

Module 'operator_benchmark' has no 'cross_product_configs' member
Error

Line: 21 Column: 26

                  tags=['short']
)

unary_ops_configs_long = op_bench.cross_product_configs(
    M=[256, 1024],
    N=[256, 1024],
    device=['cpu', 'cuda'],
    tags=['long']
)

            

Reported by Pylint.

Module 'operator_benchmark' has no 'TorchBenchmarkBase' member
Error

Line: 28 Column: 24

                  tags=['long']
)

class UnaryOpBenchmark(op_bench.TorchBenchmarkBase):
    def init(self, M, N, device, op_func):
        self.inputs = {
            "input": torch.rand(M, N, device=device)
        }
        self.op_func = op_func

            

Reported by Pylint.

Module 'operator_benchmark' has no 'op_list' member
Error

Line: 68 Column: 18

              def long_(input):
    return input.long()

unary_ops_list = op_bench.op_list(
    attr_names=['op_name', 'op_func'],
    attrs=[
        ['abs', torch.abs],
        ['abs_', torch.abs_],
        ['acos', torch.acos],

            

Reported by Pylint.

Module 'operator_benchmark' has no 'generate_pt_tests_from_op_list' member
Error

Line: 155 Column: 1

              )


op_bench.generate_pt_tests_from_op_list(unary_ops_list,
                                        unary_ops_configs_short + unary_ops_configs_long,
                                        UnaryOpBenchmark)


if __name__ == "__main__":

            

Reported by Pylint.

String statement has no effect
Error

Line: 6 Column: 1

              import torch


"""Microbenchmarks for point-wise unary operator."""


# Configs for pointwise unary ops
unary_ops_configs_short = op_bench.config_list(
    attr_names=['M', 'N'],

            

Reported by Pylint.

Attribute 'inputs' defined outside __init__
Error

Line: 30 Column: 9

              
class UnaryOpBenchmark(op_bench.TorchBenchmarkBase):
    def init(self, M, N, device, op_func):
        self.inputs = {
            "input": torch.rand(M, N, device=device)
        }
        self.op_func = op_func

    def forward(self, input):

            

Reported by Pylint.

Attribute 'op_func' defined outside __init__
Error

Line: 33 Column: 9

                      self.inputs = {
            "input": torch.rand(M, N, device=device)
        }
        self.op_func = op_func

    def forward(self, input):
        return self.op_func(input)

def bernoulli_(input):

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 35 Column: 23

                      }
        self.op_func = op_func

    def forward(self, input):
        return self.op_func(input)

def bernoulli_(input):
    return input.bernoulli_()


            

Reported by Pylint.

test/test_throughput_benchmark.py
37 issues
Unable to import 'torch'
Error

Line: 2 Column: 1

              
import torch
from torch.utils import ThroughputBenchmark
from torch.testing import assert_allclose

from torch.testing._internal.common_utils import run_tests, TestCase, TemporaryFileName

class TwoLayerNet(torch.jit.ScriptModule):
    def __init__(self, D_in, H, D_out):

            

Reported by Pylint.

Unable to import 'torch.utils'
Error

Line: 3 Column: 1

              
import torch
from torch.utils import ThroughputBenchmark
from torch.testing import assert_allclose

from torch.testing._internal.common_utils import run_tests, TestCase, TemporaryFileName

class TwoLayerNet(torch.jit.ScriptModule):
    def __init__(self, D_in, H, D_out):

            

Reported by Pylint.

Unable to import 'torch.testing'
Error

Line: 4 Column: 1

              
import torch
from torch.utils import ThroughputBenchmark
from torch.testing import assert_allclose

from torch.testing._internal.common_utils import run_tests, TestCase, TemporaryFileName

class TwoLayerNet(torch.jit.ScriptModule):
    def __init__(self, D_in, H, D_out):

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 6 Column: 1

              from torch.utils import ThroughputBenchmark
from torch.testing import assert_allclose

from torch.testing._internal.common_utils import run_tests, TestCase, TemporaryFileName

class TwoLayerNet(torch.jit.ScriptModule):
    def __init__(self, D_in, H, D_out):
        super(TwoLayerNet, self).__init__()
        self.linear1 = torch.nn.Linear(D_in, H)

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 51 Column: 13

                          inputs.append([torch.randn(B, D_in), torch.randn(B, D_in)])
        bench = ThroughputBenchmark(module)

        for input in inputs:
            # can do both args and kwargs here
            bench.add_input(input[0], x2=input[1])

        for i in range(NUM_INPUTS):
            # or just unpack the list of inputs

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              
import torch
from torch.utils import ThroughputBenchmark
from torch.testing import assert_allclose

from torch.testing._internal.common_utils import run_tests, TestCase, TemporaryFileName

class TwoLayerNet(torch.jit.ScriptModule):
    def __init__(self, D_in, H, D_out):

            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 8 Column: 1

              
from torch.testing._internal.common_utils import run_tests, TestCase, TemporaryFileName

class TwoLayerNet(torch.jit.ScriptModule):
    def __init__(self, D_in, H, D_out):
        super(TwoLayerNet, self).__init__()
        self.linear1 = torch.nn.Linear(D_in, H)
        self.linear2 = torch.nn.Linear(2 * H, D_out)


            

Reported by Pylint.

Missing class docstring
Error

Line: 8 Column: 1

              
from torch.testing._internal.common_utils import run_tests, TestCase, TemporaryFileName

class TwoLayerNet(torch.jit.ScriptModule):
    def __init__(self, D_in, H, D_out):
        super(TwoLayerNet, self).__init__()
        self.linear1 = torch.nn.Linear(D_in, H)
        self.linear2 = torch.nn.Linear(2 * H, D_out)


            

Reported by Pylint.

Argument name "D_in" doesn't conform to snake_case naming style
Error

Line: 9 Column: 5

              from torch.testing._internal.common_utils import run_tests, TestCase, TemporaryFileName

class TwoLayerNet(torch.jit.ScriptModule):
    def __init__(self, D_in, H, D_out):
        super(TwoLayerNet, self).__init__()
        self.linear1 = torch.nn.Linear(D_in, H)
        self.linear2 = torch.nn.Linear(2 * H, D_out)

    @torch.jit.script_method

            

Reported by Pylint.

Argument name "H" doesn't conform to snake_case naming style
Error

Line: 9 Column: 5

              from torch.testing._internal.common_utils import run_tests, TestCase, TemporaryFileName

class TwoLayerNet(torch.jit.ScriptModule):
    def __init__(self, D_in, H, D_out):
        super(TwoLayerNet, self).__init__()
        self.linear1 = torch.nn.Linear(D_in, H)
        self.linear2 = torch.nn.Linear(2 * H, D_out)

    @torch.jit.script_method

            

Reported by Pylint.

test/test_jit_fuser_legacy.py
37 issues
Unused import MiLSTMCell from wildcard import
Error

Line: 3 Column: 1

              import sys
sys.argv.append("--jit_executor=legacy")
from test_jit_fuser import *  # noqa: F403

if __name__ == '__main__':
    run_tests()

            

Reported by Pylint.

Unused import LSTMCellF from wildcard import
Error

Line: 3 Column: 1

              import sys
sys.argv.append("--jit_executor=legacy")
from test_jit_fuser import *  # noqa: F403

if __name__ == '__main__':
    run_tests()

            

Reported by Pylint.

Unused import LSTMCellC from wildcard import
Error

Line: 3 Column: 1

              import sys
sys.argv.append("--jit_executor=legacy")
from test_jit_fuser import *  # noqa: F403

if __name__ == '__main__':
    run_tests()

            

Reported by Pylint.

Unused import shell from wildcard import
Error

Line: 3 Column: 1

              import sys
sys.argv.append("--jit_executor=legacy")
from test_jit_fuser import *  # noqa: F403

if __name__ == '__main__':
    run_tests()

            

Reported by Pylint.

Unused import JitTestCase from wildcard import
Error

Line: 3 Column: 1

              import sys
sys.argv.append("--jit_executor=legacy")
from test_jit_fuser import *  # noqa: F403

if __name__ == '__main__':
    run_tests()

            

Reported by Pylint.

Unused import get_milstm_inputs from wildcard import
Error

Line: 3 Column: 1

              import sys
sys.argv.append("--jit_executor=legacy")
from test_jit_fuser import *  # noqa: F403

if __name__ == '__main__':
    run_tests()

            

Reported by Pylint.

Unused import get_lstm_inputs from wildcard import
Error

Line: 3 Column: 1

              import sys
sys.argv.append("--jit_executor=legacy")
from test_jit_fuser import *  # noqa: F403

if __name__ == '__main__':
    run_tests()

            

Reported by Pylint.

Unused import enable_cpu_fuser from wildcard import
Error

Line: 3 Column: 1

              import sys
sys.argv.append("--jit_executor=legacy")
from test_jit_fuser import *  # noqa: F403

if __name__ == '__main__':
    run_tests()

            

Reported by Pylint.

Unused import all_backward_graphs from wildcard import
Error

Line: 3 Column: 1

              import sys
sys.argv.append("--jit_executor=legacy")
from test_jit_fuser import *  # noqa: F403

if __name__ == '__main__':
    run_tests()

            

Reported by Pylint.

Unused import backward_graph from wildcard import
Error

Line: 3 Column: 1

              import sys
sys.argv.append("--jit_executor=legacy")
from test_jit_fuser import *  # noqa: F403

if __name__ == '__main__':
    run_tests()

            

Reported by Pylint.

torch/fx/experimental/unification/core.py
37 issues
Attempted relative import beyond top-level package
Error

Line: 4 Column: 1

              from collections.abc import Iterator  # type: ignore[import]
from functools import partial

from .unification_tools import assoc  # type: ignore[import]
from .utils import transitive_get as walk
from .variable import isvar
from .dispatch import dispatch

################

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 5 Column: 1

              from functools import partial

from .unification_tools import assoc  # type: ignore[import]
from .utils import transitive_get as walk
from .variable import isvar
from .dispatch import dispatch

################
# Reificiation #

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 6 Column: 1

              
from .unification_tools import assoc  # type: ignore[import]
from .utils import transitive_get as walk
from .variable import isvar
from .dispatch import dispatch

################
# Reificiation #
################

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 7 Column: 1

              from .unification_tools import assoc  # type: ignore[import]
from .utils import transitive_get as walk
from .variable import isvar
from .dispatch import dispatch

################
# Reificiation #
################


            

Reported by Pylint.

Too many positional arguments for function call
Error

Line: 64 Column: 13

                  if len(u) != len(v):
        return False
    for uu, vv in zip(u, v):  # avoiding recursion
        s = unify(uu, vv, s)
        if s is False:
            return False
    return s
#
# @dispatch((set, frozenset), (set, frozenset), dict)

            

Reported by Pylint.

function already defined line 96
Error

Line: 114 Column: 1

              unify

@dispatch(object, object)  # type: ignore[no-redef]
def unify(u, v):
    return unify(u, v, {})

            

Reported by Pylint.

Too many positional arguments for function call
Error

Line: 115 Column: 12

              
@dispatch(object, object)  # type: ignore[no-redef]
def unify(u, v):
    return unify(u, v, {})

            

Reported by Pylint.

Statement seems to have no effect
Error

Line: 17 Column: 1

              def _reify(t, s):
    return map(partial(reify, s=s), t)
    # return (reify(arg, s) for arg in t)
_reify

@dispatch(tuple, dict)  # type: ignore[no-redef]
def _reify(t, s):
    return tuple(reify(iter(t), s))
_reify

            

Reported by Pylint.

Statement seems to have no effect
Error

Line: 22 Column: 1

              @dispatch(tuple, dict)  # type: ignore[no-redef]
def _reify(t, s):
    return tuple(reify(iter(t), s))
_reify

@dispatch(list, dict)  # type: ignore[no-redef]
def _reify(t, s):
    return list(reify(iter(t), s))
_reify

            

Reported by Pylint.

Statement seems to have no effect
Error

Line: 27 Column: 1

              @dispatch(list, dict)  # type: ignore[no-redef]
def _reify(t, s):
    return list(reify(iter(t), s))
_reify

@dispatch(dict, dict)  # type: ignore[no-redef]
def _reify(d, s):
    return dict((k, reify(v, s)) for k, v in d.items())
_reify

            

Reported by Pylint.

benchmarks/fastrnns/test.py
37 issues
Unable to import 'torch'
Error

Line: 2 Column: 1

              import argparse
import torch
import torch.nn as nn

from .factory import pytorch_lstm_creator, varlen_pytorch_lstm_creator
from .runner import get_nn_runners


def barf():

            

Reported by Pylint.

Unable to import 'torch.nn'
Error

Line: 3 Column: 1

              import argparse
import torch
import torch.nn as nn

from .factory import pytorch_lstm_creator, varlen_pytorch_lstm_creator
from .runner import get_nn_runners


def barf():

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 5 Column: 1

              import torch
import torch.nn as nn

from .factory import pytorch_lstm_creator, varlen_pytorch_lstm_creator
from .runner import get_nn_runners


def barf():
    import pdb

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 6 Column: 1

              import torch.nn as nn

from .factory import pytorch_lstm_creator, varlen_pytorch_lstm_creator
from .runner import get_nn_runners


def barf():
    import pdb
    pdb.set_trace()

            

Reported by Pylint.

Unused argument 'check_grad'
Error

Line: 27 Column: 49

                  return [t for t in tensors if t.requires_grad]


def test_rnns(experim_creator, control_creator, check_grad=True, verbose=False,
              seqLength=100, numLayers=1, inputSize=512, hiddenSize=512,
              miniBatch=64, device='cuda', seed=17):
    creator_args = dict(seqLength=seqLength, numLayers=numLayers,
                        inputSize=inputSize, hiddenSize=hiddenSize,
                        miniBatch=miniBatch, device=device, seed=seed)

            

Reported by Pylint.

Redefining name 'test_args' from outer scope (line 148)
Error

Line: 67 Column: 1

                  print('')


def test_vl_py(**test_args):
    # XXX: This compares vl_py with vl_lstm.
    # It's done this way because those two don't give the same outputs so
    # the result isn't an apples-to-apples comparison right now.
    control_creator = varlen_pytorch_lstm_creator
    name, experim_creator, context = get_nn_runners('vl_py')[0]

            

Reported by Pylint.

XXX: This compares vl_py with vl_lstm.
Error

Line: 68 Column: 3

              

def test_vl_py(**test_args):
    # XXX: This compares vl_py with vl_lstm.
    # It's done this way because those two don't give the same outputs so
    # the result isn't an apples-to-apples comparison right now.
    control_creator = varlen_pytorch_lstm_creator
    name, experim_creator, context = get_nn_runners('vl_py')[0]
    with context():

            

Reported by Pylint.

Redefining name 'name' from outer scope (line 155)
Error

Line: 72 Column: 5

                  # It's done this way because those two don't give the same outputs so
    # the result isn't an apples-to-apples comparison right now.
    control_creator = varlen_pytorch_lstm_creator
    name, experim_creator, context = get_nn_runners('vl_py')[0]
    with context():
        print('testing {}...'.format(name))
        creator_keys = [
            'seqLength', 'numLayers', 'inputSize',
            'hiddenSize', 'miniBatch', 'device', 'seed'

            

Reported by Pylint.

Redefining name 'context' from outer scope (line 155)
Error

Line: 72 Column: 28

                  # It's done this way because those two don't give the same outputs so
    # the result isn't an apples-to-apples comparison right now.
    control_creator = varlen_pytorch_lstm_creator
    name, experim_creator, context = get_nn_runners('vl_py')[0]
    with context():
        print('testing {}...'.format(name))
        creator_keys = [
            'seqLength', 'numLayers', 'inputSize',
            'hiddenSize', 'miniBatch', 'device', 'seed'

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import argparse
import torch
import torch.nn as nn

from .factory import pytorch_lstm_creator, varlen_pytorch_lstm_creator
from .runner import get_nn_runners


def barf():

            

Reported by Pylint.

caffe2/python/operator_test/rmac_regions_op_test.py
37 issues
Unable to import 'hypothesis'
Error

Line: 7 Column: 1

              

from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np



            

Reported by Pylint.

Unable to import 'hypothesis.strategies'
Error

Line: 9 Column: 1

              from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np


class RMACRegionsOpTest(hu.HypothesisTestCase):
    @given(

            

Reported by Pylint.

Unused argument 'dc'
Error

Line: 22 Column: 41

                      **hu.gcs
    )
    @settings(deadline=10000)
    def test(self, n, h, w, scales, gc, dc):
        X = np.random.rand(n, 64, h, w).astype(np.float32)
        overlap = 0.4

        def ref_op(X):
            N, H, W = X.shape[0], X.shape[2], X.shape[3]

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              




from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st

            

Reported by Pylint.

Missing class docstring
Error

Line: 13 Column: 1

              import numpy as np


class RMACRegionsOpTest(hu.HypothesisTestCase):
    @given(
        n=st.integers(500, 500),
        h=st.integers(1, 10),
        w=st.integers(1, 10),
        scales=st.integers(1, 3),

            

Reported by Pylint.

Argument name "gc" doesn't conform to snake_case naming style
Error

Line: 21 Column: 5

                      scales=st.integers(1, 3),
        **hu.gcs
    )
    @settings(deadline=10000)
    def test(self, n, h, w, scales, gc, dc):
        X = np.random.rand(n, 64, h, w).astype(np.float32)
        overlap = 0.4

        def ref_op(X):

            

Reported by Pylint.

Argument name "dc" doesn't conform to snake_case naming style
Error

Line: 21 Column: 5

                      scales=st.integers(1, 3),
        **hu.gcs
    )
    @settings(deadline=10000)
    def test(self, n, h, w, scales, gc, dc):
        X = np.random.rand(n, 64, h, w).astype(np.float32)
        overlap = 0.4

        def ref_op(X):

            

Reported by Pylint.

Argument name "h" doesn't conform to snake_case naming style
Error

Line: 21 Column: 5

                      scales=st.integers(1, 3),
        **hu.gcs
    )
    @settings(deadline=10000)
    def test(self, n, h, w, scales, gc, dc):
        X = np.random.rand(n, 64, h, w).astype(np.float32)
        overlap = 0.4

        def ref_op(X):

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 21 Column: 5

                      scales=st.integers(1, 3),
        **hu.gcs
    )
    @settings(deadline=10000)
    def test(self, n, h, w, scales, gc, dc):
        X = np.random.rand(n, 64, h, w).astype(np.float32)
        overlap = 0.4

        def ref_op(X):

            

Reported by Pylint.

Too many arguments (7/5)
Error

Line: 21 Column: 5

                      scales=st.integers(1, 3),
        **hu.gcs
    )
    @settings(deadline=10000)
    def test(self, n, h, w, scales, gc, dc):
        X = np.random.rand(n, 64, h, w).astype(np.float32)
        overlap = 0.4

        def ref_op(X):

            

Reported by Pylint.

torch/utils/benchmark/examples/blas_compare.py
37 issues
subprocess call with shell=True identified, security issue.
Security injection

Line: 134
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b602_subprocess_popen_with_shell_equals_true.html

                          f"--DETAIL_env {env}",
            env=env_vars,
            stdout=subprocess.PIPE,
            shell=True
        )

        with open(result_file, "rb") as f:
            result_bytes = f.read()


            

Reported by Bandit.

subprocess call with shell=True identified, security issue.
Security injection

Line: 201
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b602_subprocess_popen_with_shell_equals_true.html

                      f"source activate {env_path} && "
        f"python {os.path.abspath(__file__)} "
        "--DETAIL_in_compare",
        shell=True
    )


if __name__ == "__main__":
    # These flags are for subprocess control, not controlling the main loop.

            

Reported by Bandit.

Attempted relative import beyond top-level package
Error

Line: 17 Column: 1

              import time
from typing import Tuple, Dict

from . import blas_compare_setup


MIN_RUN_TIME = 1
NUM_REPLICATES = 20
NUM_THREAD_SETTINGS = (1, 2, 4)

            

Reported by Pylint.

Value 'queue.Queue' is unsubscriptable
Error

Line: 35 Column: 15

              

_RESULT_FILE_LOCK = threading.Lock()
_WORKER_POOL: queue.Queue[Tuple[str, str, int]] = queue.Queue()
def clear_worker_pool():
    while not _WORKER_POOL.empty():
        _, result_file, _ = _WORKER_POOL.get_nowait()
        os.remove(result_file)


            

Reported by Pylint.

Module 'torch' has no 'float64' member
Error

Line: 75 Column: 57

                  torch.manual_seed(seed)
    results = []
    for n in [4, 8, 16, 32, 64, 128, 256, 512, 1024, 7, 96, 150, 225]:
        dtypes = (("Single", torch.float32), ("Double", torch.float64))
        shapes = (
            # Square MatMul
            ((n, n), (n, n), "(n x n) x (n x n)", "Matrix-Matrix Product"),

            # Matrix-Vector product

            

Reported by Pylint.

Module 'torch' has no 'float32' member
Error

Line: 75 Column: 30

                  torch.manual_seed(seed)
    results = []
    for n in [4, 8, 16, 32, 64, 128, 256, 512, 1024, 7, 96, 150, 225]:
        dtypes = (("Single", torch.float32), ("Double", torch.float64))
        shapes = (
            # Square MatMul
            ((n, n), (n, n), "(n x n) x (n x n)", "Matrix-Matrix Product"),

            # Matrix-Vector product

            

Reported by Pylint.

Module 'torch' has no 'rand' member
Error

Line: 91 Column: 26

                              description=f"n = {n}",
                env=os.path.split(env or "")[1] or None,
                globals={
                    "x": torch.rand(x_shape, dtype=dtype),
                    "y": torch.rand(y_shape, dtype=dtype),
                },
                num_threads=num_threads,
            ).blocked_autorange(min_run_time=MIN_RUN_TIME)
            results.append(t)

            

Reported by Pylint.

Module 'torch' has no 'rand' member
Error

Line: 92 Column: 26

                              env=os.path.split(env or "")[1] or None,
                globals={
                    "x": torch.rand(x_shape, dtype=dtype),
                    "y": torch.rand(y_shape, dtype=dtype),
                },
                num_threads=num_threads,
            ).blocked_autorange(min_run_time=MIN_RUN_TIME)
            results.append(t)


            

Reported by Pylint.

Redefining name 'args' from outer scope (line 215)
Error

Line: 103 Column: 20

                          pickle.dump(results, f)


def run_subprocess(args):
    seed, env, sub_label, extra_env_vars = args
    core_str = None
    try:
        core_str, result_file, num_threads = _WORKER_POOL.get()
        with open(result_file, "wb"):

            

Reported by Pylint.

Using subprocess.run without explicitly set `check` is not recommended.
Error

Line: 122 Column: 9

                      }
        env_vars.update(extra_env_vars or {})

        subprocess.run(
            f"source activate {env} && "
            f"taskset --cpu-list {core_str} "
            f"python {os.path.abspath(__file__)} "
            "--DETAIL_in_subprocess "
            f"--DETAIL_seed {seed} "

            

Reported by Pylint.

caffe2/contrib/playground/resnetdemo/explicit_resnet_forward.py
37 issues
Unused argument 'model'
Error

Line: 24 Column: 40

              }


def gen_forward_pass_builder_fun(self, model, dataset, is_train):
    split = 'train' if is_train else 'test'
    opts = self.opts

    def model_creator(model, loss_scale):
        model, softmax, loss = resnet_imagenet_create_model(

            

Reported by Pylint.

Unused argument 'loss_scale'
Error

Line: 28 Column: 30

                  split = 'train' if is_train else 'test'
    opts = self.opts

    def model_creator(model, loss_scale):
        model, softmax, loss = resnet_imagenet_create_model(
            model=model,
            data='data',
            labels='label',
            split=split,

            

Reported by Pylint.

Unused variable 'softmax'
Error

Line: 29 Column: 16

                  opts = self.opts

    def model_creator(model, loss_scale):
        model, softmax, loss = resnet_imagenet_create_model(
            model=model,
            data='data',
            labels='label',
            split=split,
            opts=opts,

            

Reported by Pylint.

Unused argument 'dataset'
Error

Line: 41 Column: 68

                  return model_creator


def resnet_imagenet_create_model(model, data, labels, split, opts, dataset):
    model_helper = ResNetModelHelper(model, split, opts)
    opts_depth = opts['model_param']['num_layer']
    engine = opts['model_param']['engine']
    log.info(' | ResNet-{} Imagenet'.format(opts_depth))
    assert opts_depth in BLOCK_CONFIG.keys(), \

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 45 Column: 14

                  model_helper = ResNetModelHelper(model, split, opts)
    opts_depth = opts['model_param']['num_layer']
    engine = opts['model_param']['engine']
    log.info(' | ResNet-{} Imagenet'.format(opts_depth))
    assert opts_depth in BLOCK_CONFIG.keys(), \
        'Block config is not defined for specified model depth. Please check.'
    (n1, n2, n3, n4) = BLOCK_CONFIG[opts_depth]

    num_features = 2048

            

Reported by Pylint.

TODO: This can be further optimized by passing dim_in, dim_out = features,
Error

Line: 76 Column: 3

                  relu_blob = model.Relu(bn_blob, bn_blob)
    max_pool = model.MaxPool(relu_blob, 'pool1', kernel=3, stride=2, pad=1)

    # TODO: This can be further optimized by passing dim_in, dim_out = features,
    # dim_out = features * 4
    if opts_depth in [50, 101, 152, 200, 264, 284]:
        blob_in, dim_in = model_helper.residual_layer(
            residual_block, max_pool, 64, 256, stride=1, num_blocks=n1,
            prefix='res2', dim_inner=64

            

Reported by Pylint.

Unused argument 'group'
Error

Line: 260 Column: 68

              
    # bottleneck residual layer for 50, 101, 152 layer networks
    def bottleneck_block(
        self, blob_in, dim_in, dim_out, stride, prefix, dim_inner, group=None
    ):
        blob_out = self.conv_bn_relu(
            blob_in, dim_in, dim_inner, 1, 1, prefix + "_branch2a", pad=0,
        )
        blob_out = self.conv_bn_relu(

            

Reported by Pylint.

Unused argument 'dim_inner'
Error

Line: 284 Column: 57

              
    # basic layer for the 18 and 34 layer networks and the CIFAR data netwrorks
    def basic_block(
        self, blob_in, dim_in, dim_out, stride, prefix, dim_inner=None,
        group=None,
    ):
        blob_out = self.conv_bn_relu(
            blob_in, dim_in, dim_out, 3, stride, prefix + "_branch2a"
        )

            

Reported by Pylint.

Unused argument 'group'
Error

Line: 285 Column: 9

                  # basic layer for the 18 and 34 layer networks and the CIFAR data netwrorks
    def basic_block(
        self, blob_in, dim_in, dim_out, stride, prefix, dim_inner=None,
        group=None,
    ):
        blob_out = self.conv_bn_relu(
            blob_in, dim_in, dim_out, 3, stride, prefix + "_branch2a"
        )
        bn_blob = self.conv_bn(

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              




import logging
logging.basicConfig()
log = logging.getLogger("AnyExp")
log.setLevel(logging.DEBUG)

            

Reported by Pylint.

torch/distributed/benchmarks/benchmark_ddp_rpc.py
37 issues
Unable to import 'torch'
Error

Line: 10 Column: 1

              import time

import numpy as np
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp

            

Reported by Pylint.

Unable to import 'torch.nn'
Error

Line: 11 Column: 1

              
import numpy as np
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
import torch.optim as optim

            

Reported by Pylint.

Unable to import 'torch.distributed'
Error

Line: 12 Column: 1

              import numpy as np
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
import torch.optim as optim
from torch.distributed.optim import DistributedOptimizer

            

Reported by Pylint.

Unable to import 'torch.distributed.autograd'
Error

Line: 13 Column: 1

              import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
import torch.optim as optim
from torch.distributed.optim import DistributedOptimizer
from torch.distributed.rpc import RRef, TensorPipeRpcBackendOptions

            

Reported by Pylint.

Unable to import 'torch.distributed.rpc'
Error

Line: 14 Column: 1

              import torch.nn as nn
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
import torch.optim as optim
from torch.distributed.optim import DistributedOptimizer
from torch.distributed.rpc import RRef, TensorPipeRpcBackendOptions
from torch.distributed.rpc.backend_registry import BackendType

            

Reported by Pylint.

Unable to import 'torch.multiprocessing'
Error

Line: 15 Column: 1

              import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
import torch.optim as optim
from torch.distributed.optim import DistributedOptimizer
from torch.distributed.rpc import RRef, TensorPipeRpcBackendOptions
from torch.distributed.rpc.backend_registry import BackendType
from torch.nn.parallel import DistributedDataParallel as DDP

            

Reported by Pylint.

Unable to import 'torch.optim'
Error

Line: 16 Column: 1

              import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
import torch.optim as optim
from torch.distributed.optim import DistributedOptimizer
from torch.distributed.rpc import RRef, TensorPipeRpcBackendOptions
from torch.distributed.rpc.backend_registry import BackendType
from torch.nn.parallel import DistributedDataParallel as DDP


            

Reported by Pylint.

Unable to import 'torch.distributed.optim'
Error

Line: 17 Column: 1

              import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
import torch.optim as optim
from torch.distributed.optim import DistributedOptimizer
from torch.distributed.rpc import RRef, TensorPipeRpcBackendOptions
from torch.distributed.rpc.backend_registry import BackendType
from torch.nn.parallel import DistributedDataParallel as DDP



            

Reported by Pylint.

Unable to import 'torch.distributed.rpc'
Error

Line: 18 Column: 1

              import torch.multiprocessing as mp
import torch.optim as optim
from torch.distributed.optim import DistributedOptimizer
from torch.distributed.rpc import RRef, TensorPipeRpcBackendOptions
from torch.distributed.rpc.backend_registry import BackendType
from torch.nn.parallel import DistributedDataParallel as DDP


# Config

            

Reported by Pylint.

Unable to import 'torch.distributed.rpc.backend_registry'
Error

Line: 19 Column: 1

              import torch.optim as optim
from torch.distributed.optim import DistributedOptimizer
from torch.distributed.rpc import RRef, TensorPipeRpcBackendOptions
from torch.distributed.rpc.backend_registry import BackendType
from torch.nn.parallel import DistributedDataParallel as DDP


# Config
NUM_TRAINERS = 8

            

Reported by Pylint.