The following issues were found
caffe2/quantization/server/gather_dnnlowp_op_test.py
11 issues
Line: 6
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
Reported by Pylint.
Line: 26
Column: 90
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_gather(self, dim1, dim2, is_empty, in_quantized, out_quantized, gc, dc):
if is_empty:
dim2 = 0
# FIXME : DNNLOWP Gather doesn't support quantized input and
# dequantized output
if in_quantized:
Reported by Pylint.
Line: 29
Column: 3
def test_dnnlowp_gather(self, dim1, dim2, is_empty, in_quantized, out_quantized, gc, dc):
if is_empty:
dim2 = 0
# FIXME : DNNLOWP Gather doesn't support quantized input and
# dequantized output
if in_quantized:
out_quantized = True
data = (np.random.rand(dim1) * 2 - 1).astype(np.float32)
Reported by Pylint.
Line: 1
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
Reported by Pylint.
Line: 17
Column: 1
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPGatherOpTest(hu.HypothesisTestCase):
@given(
dim1=st.integers(256, 512),
dim2=st.integers(32, 256),
is_empty=st.booleans(),
in_quantized=st.booleans(),
Reported by Pylint.
Line: 25
Column: 5
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_gather(self, dim1, dim2, is_empty, in_quantized, out_quantized, gc, dc):
if is_empty:
dim2 = 0
# FIXME : DNNLOWP Gather doesn't support quantized input and
# dequantized output
Reported by Pylint.
Line: 25
Column: 5
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_gather(self, dim1, dim2, is_empty, in_quantized, out_quantized, gc, dc):
if is_empty:
dim2 = 0
# FIXME : DNNLOWP Gather doesn't support quantized input and
# dequantized output
Reported by Pylint.
Line: 25
Column: 5
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_gather(self, dim1, dim2, is_empty, in_quantized, out_quantized, gc, dc):
if is_empty:
dim2 = 0
# FIXME : DNNLOWP Gather doesn't support quantized input and
# dequantized output
Reported by Pylint.
Line: 25
Column: 5
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_gather(self, dim1, dim2, is_empty, in_quantized, out_quantized, gc, dc):
if is_empty:
dim2 = 0
# FIXME : DNNLOWP Gather doesn't support quantized input and
# dequantized output
Reported by Pylint.
test/jit_hooks/model.py
11 issues
Line: 4
Column: 1
import argparse
import os
import sys
import torch
# grab modules from test_jit_hooks.cpp
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from jit.test_hooks_modules import (
Reported by Pylint.
Line: 9
Column: 1
# grab modules from test_jit_hooks.cpp
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from jit.test_hooks_modules import (
create_forward_tuple_input, create_module_forward_multiple_inputs,
create_module_forward_single_input, create_module_hook_return_nothing,
create_module_multiple_hooks_multiple_inputs,
create_module_multiple_hooks_single_input, create_module_no_forward_input,
create_module_same_hook_repeated, create_submodule_forward_multiple_inputs,
Reported by Pylint.
Line: 29
Column: 5
)
parser.add_argument("--export-script-module-to", required=True)
options = parser.parse_args()
global save_name
save_name = options.export_script_module_to + "_"
tests = [
("test_submodule_forward_single_input", create_submodule_forward_single_input()),
("test_submodule_forward_multiple_inputs", create_submodule_forward_multiple_inputs()),
Reported by Pylint.
Line: 1
Column: 1
import argparse
import os
import sys
import torch
# grab modules from test_jit_hooks.cpp
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from jit.test_hooks_modules import (
Reported by Pylint.
Line: 9
Column: 1
# grab modules from test_jit_hooks.cpp
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from jit.test_hooks_modules import (
create_forward_tuple_input, create_module_forward_multiple_inputs,
create_module_forward_single_input, create_module_hook_return_nothing,
create_module_multiple_hooks_multiple_inputs,
create_module_multiple_hooks_single_input, create_module_no_forward_input,
create_module_same_hook_repeated, create_submodule_forward_multiple_inputs,
Reported by Pylint.
Line: 23
Column: 1
create_submodule_to_call_directly_with_hooks)
# Create saved modules for JIT forward hooks and pre-hooks
def main():
parser = argparse.ArgumentParser(
description="Serialize a script modules with hooks attached"
)
parser.add_argument("--export-script-module-to", required=True)
options = parser.parse_args()
Reported by Pylint.
Line: 29
Column: 5
)
parser.add_argument("--export-script-module-to", required=True)
options = parser.parse_args()
global save_name
save_name = options.export_script_module_to + "_"
tests = [
("test_submodule_forward_single_input", create_submodule_forward_single_input()),
("test_submodule_forward_multiple_inputs", create_submodule_forward_multiple_inputs()),
Reported by Pylint.
Line: 35
Column: 1
tests = [
("test_submodule_forward_single_input", create_submodule_forward_single_input()),
("test_submodule_forward_multiple_inputs", create_submodule_forward_multiple_inputs()),
("test_submodule_multiple_hooks_single_input", create_submodule_multiple_hooks_single_input()),
("test_submodule_multiple_hooks_multiple_inputs", create_submodule_multiple_hooks_multiple_inputs()),
("test_submodule_hook_return_nothing", create_submodule_hook_return_nothing()),
("test_submodule_same_hook_repeated", create_submodule_same_hook_repeated()),
("test_module_forward_single_input", create_module_forward_single_input()),
Reported by Pylint.
Line: 36
Column: 1
("test_submodule_forward_single_input", create_submodule_forward_single_input()),
("test_submodule_forward_multiple_inputs", create_submodule_forward_multiple_inputs()),
("test_submodule_multiple_hooks_single_input", create_submodule_multiple_hooks_single_input()),
("test_submodule_multiple_hooks_multiple_inputs", create_submodule_multiple_hooks_multiple_inputs()),
("test_submodule_hook_return_nothing", create_submodule_hook_return_nothing()),
("test_submodule_same_hook_repeated", create_submodule_same_hook_repeated()),
("test_module_forward_single_input", create_module_forward_single_input()),
("test_module_forward_multiple_inputs", create_module_forward_multiple_inputs()),
Reported by Pylint.
Line: 43
Column: 1
("test_module_forward_single_input", create_module_forward_single_input()),
("test_module_forward_multiple_inputs", create_module_forward_multiple_inputs()),
("test_module_multiple_hooks_single_input", create_module_multiple_hooks_single_input()),
("test_module_multiple_hooks_multiple_inputs", create_module_multiple_hooks_multiple_inputs()),
("test_module_hook_return_nothing", create_module_hook_return_nothing()),
("test_module_same_hook_repeated", create_module_same_hook_repeated()),
("test_module_no_forward_input", create_module_no_forward_input()),
("test_forward_tuple_input", create_forward_tuple_input()),
Reported by Pylint.
caffe2/python/operator_test/floor_op_test.py
11 issues
Line: 10
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 11
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestFloor(serial.SerializedTestCase):
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 13
Column: 1
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestFloor(serial.SerializedTestCase):
@given(X=hu.tensor(),
Reported by Pylint.
Line: 16
Column: 1
import unittest
class TestFloor(serial.SerializedTestCase):
@given(X=hu.tensor(),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
@settings(deadline=10000)
Reported by Pylint.
Line: 22
Column: 5
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
@settings(deadline=10000)
def test_floor(self, X, gc, dc, engine):
op = core.CreateOperator("Floor", ["X"], ["Y"], engine=engine)
def floor_ref(X):
return (np.floor(X),)
Reported by Pylint.
Line: 22
Column: 5
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
@settings(deadline=10000)
def test_floor(self, X, gc, dc, engine):
op = core.CreateOperator("Floor", ["X"], ["Y"], engine=engine)
def floor_ref(X):
return (np.floor(X),)
Reported by Pylint.
Line: 22
Column: 5
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
@settings(deadline=10000)
def test_floor(self, X, gc, dc, engine):
op = core.CreateOperator("Floor", ["X"], ["Y"], engine=engine)
def floor_ref(X):
return (np.floor(X),)
Reported by Pylint.
Line: 22
Column: 5
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
@settings(deadline=10000)
def test_floor(self, X, gc, dc, engine):
op = core.CreateOperator("Floor", ["X"], ["Y"], engine=engine)
def floor_ref(X):
return (np.floor(X),)
Reported by Pylint.
Line: 23
Column: 9
**hu.gcs)
@settings(deadline=10000)
def test_floor(self, X, gc, dc, engine):
op = core.CreateOperator("Floor", ["X"], ["Y"], engine=engine)
def floor_ref(X):
return (np.floor(X),)
self.assertReferenceChecks(
Reported by Pylint.
caffe2/quantization/server/tanh_dnnlowp_op_test.py
11 issues
Line: 6
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
Reported by Pylint.
Line: 9
Column: 1
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
Reported by Pylint.
Line: 19
Column: 53
class DNNLowPTanhOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_tanh(self, size, is_empty, gc, dc):
if is_empty:
size = 0
X = (np.random.rand(size) * 10 - 5).astype(np.float32)
Reported by Pylint.
Line: 1
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
Reported by Pylint.
Line: 16
Column: 1
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPTanhOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_tanh(self, size, is_empty, gc, dc):
if is_empty:
size = 0
Reported by Pylint.
Line: 19
Column: 5
class DNNLowPTanhOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_tanh(self, size, is_empty, gc, dc):
if is_empty:
size = 0
X = (np.random.rand(size) * 10 - 5).astype(np.float32)
Reported by Pylint.
Line: 19
Column: 5
class DNNLowPTanhOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_tanh(self, size, is_empty, gc, dc):
if is_empty:
size = 0
X = (np.random.rand(size) * 10 - 5).astype(np.float32)
Reported by Pylint.
Line: 19
Column: 5
class DNNLowPTanhOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_tanh(self, size, is_empty, gc, dc):
if is_empty:
size = 0
X = (np.random.rand(size) * 10 - 5).astype(np.float32)
Reported by Pylint.
Line: 19
Column: 5
class DNNLowPTanhOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_tanh(self, size, is_empty, gc, dc):
if is_empty:
size = 0
X = (np.random.rand(size) * 10 - 5).astype(np.float32)
Reported by Pylint.
Line: 23
Column: 9
if is_empty:
size = 0
X = (np.random.rand(size) * 10 - 5).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [("Tanh", ""), ("Tanh", "DNNLOWP"), ("Int8Tanh", "DNNLOWP")]
Reported by Pylint.
caffe2/python/operator_test/unique_ops_test.py
11 issues
Line: 21
Column: 1
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
from functools import partial
from caffe2.python import core
Reported by Pylint.
Line: 22
Column: 1
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
from functools import partial
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 24
Column: 1
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
from functools import partial
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 31
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
def _unique_ref(x, return_inverse):
ret = np.unique(x, return_inverse=return_inverse)
if not return_inverse:
ret = [ret]
return ret
Reported by Pylint.
Line: 38
Column: 1
return ret
class TestUniqueOps(serial.SerializedTestCase):
@given(
X=hu.tensor1d(
# allow empty
min_len=0,
dtype=np.int32,
Reported by Pylint.
Line: 49
Column: 5
return_remapping=st.booleans(),
**hu.gcs_no_hip
)
@settings(deadline=10000)
def test_unique_op(self, X, return_remapping, gc, dc):
# impl of unique op does not guarantees return order, sort the input
# so different impl return same outputs
X = np.sort(X)
Reported by Pylint.
Line: 49
Column: 5
return_remapping=st.booleans(),
**hu.gcs_no_hip
)
@settings(deadline=10000)
def test_unique_op(self, X, return_remapping, gc, dc):
# impl of unique op does not guarantees return order, sort the input
# so different impl return same outputs
X = np.sort(X)
Reported by Pylint.
Line: 49
Column: 5
return_remapping=st.booleans(),
**hu.gcs_no_hip
)
@settings(deadline=10000)
def test_unique_op(self, X, return_remapping, gc, dc):
# impl of unique op does not guarantees return order, sort the input
# so different impl return same outputs
X = np.sort(X)
Reported by Pylint.
Line: 49
Column: 5
return_remapping=st.booleans(),
**hu.gcs_no_hip
)
@settings(deadline=10000)
def test_unique_op(self, X, return_remapping, gc, dc):
# impl of unique op does not guarantees return order, sort the input
# so different impl return same outputs
X = np.sort(X)
Reported by Pylint.
caffe2/quantization/server/sigmoid_dnnlowp_op_test.py
11 issues
Line: 6
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given
Reported by Pylint.
Line: 9
Column: 1
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
Reported by Pylint.
Line: 18
Column: 56
class DNNLowPSigmoidOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
def test_dnnlowp_sigmoid(self, size, is_empty, gc, dc):
if is_empty:
size = 0
X = (np.random.rand(size) * 20 - 10).astype(np.float32)
Reported by Pylint.
Line: 1
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given
Reported by Pylint.
Line: 16
Column: 1
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPSigmoidOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
def test_dnnlowp_sigmoid(self, size, is_empty, gc, dc):
if is_empty:
size = 0
Reported by Pylint.
Line: 18
Column: 5
class DNNLowPSigmoidOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
def test_dnnlowp_sigmoid(self, size, is_empty, gc, dc):
if is_empty:
size = 0
X = (np.random.rand(size) * 20 - 10).astype(np.float32)
Reported by Pylint.
Line: 18
Column: 5
class DNNLowPSigmoidOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
def test_dnnlowp_sigmoid(self, size, is_empty, gc, dc):
if is_empty:
size = 0
X = (np.random.rand(size) * 20 - 10).astype(np.float32)
Reported by Pylint.
Line: 18
Column: 5
class DNNLowPSigmoidOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
def test_dnnlowp_sigmoid(self, size, is_empty, gc, dc):
if is_empty:
size = 0
X = (np.random.rand(size) * 20 - 10).astype(np.float32)
Reported by Pylint.
Line: 18
Column: 5
class DNNLowPSigmoidOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
def test_dnnlowp_sigmoid(self, size, is_empty, gc, dc):
if is_empty:
size = 0
X = (np.random.rand(size) * 20 - 10).astype(np.float32)
Reported by Pylint.
Line: 22
Column: 9
if is_empty:
size = 0
X = (np.random.rand(size) * 20 - 10).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
Reported by Pylint.
caffe2/python/onnx/backend_cpp_rep.py
11 issues
Line: 9
Column: 1
from onnx.backend.base import BackendRep, namedtupledict
# This is a wrapper around C++ Caffe2BackendRep,
# mainly to handle the different input and output types for convenience of Python
class Caffe2CppRep(BackendRep):
def __init__(self, cpp_rep):
Reported by Pylint.
Line: 1
Column: 1
## @package onnx
# Module caffe2.python.onnx.backend_rep_cpp
from onnx.backend.base import BackendRep, namedtupledict
Reported by Pylint.
Line: 13
Column: 1
# This is a wrapper around C++ Caffe2BackendRep,
# mainly to handle the different input and output types for convenience of Python
class Caffe2CppRep(BackendRep):
def __init__(self, cpp_rep):
super(Caffe2CppRep, self).__init__()
self.__core = cpp_rep
self.__external_outputs = cpp_rep.external_outputs()
self.__external_inputs = cpp_rep.external_inputs()
Reported by Pylint.
Line: 15
Column: 9
# mainly to handle the different input and output types for convenience of Python
class Caffe2CppRep(BackendRep):
def __init__(self, cpp_rep):
super(Caffe2CppRep, self).__init__()
self.__core = cpp_rep
self.__external_outputs = cpp_rep.external_outputs()
self.__external_inputs = cpp_rep.external_inputs()
self.__uninitialized_inputs = cpp_rep.uninitialized_inputs()
Reported by Pylint.
Line: 21
Column: 5
self.__external_inputs = cpp_rep.external_inputs()
self.__uninitialized_inputs = cpp_rep.uninitialized_inputs()
def init_net(self):
return self.__core.init_net()
def pred_net(self):
return self.__core.pred_net()
Reported by Pylint.
Line: 24
Column: 5
def init_net(self):
return self.__core.init_net()
def pred_net(self):
return self.__core.pred_net()
def external_outputs(self):
return self.__core.external_outputs()
Reported by Pylint.
Line: 27
Column: 5
def pred_net(self):
return self.__core.pred_net()
def external_outputs(self):
return self.__core.external_outputs()
def external_inputs(self):
return self.__core.external_inputs()
Reported by Pylint.
Line: 30
Column: 5
def external_outputs(self):
return self.__core.external_outputs()
def external_inputs(self):
return self.__core.external_inputs()
def run(self, inputs):
output_values = None
if isinstance(inputs, dict):
Reported by Pylint.
Line: 33
Column: 5
def external_inputs(self):
return self.__core.external_inputs()
def run(self, inputs):
output_values = None
if isinstance(inputs, dict):
output_values = self.__core.run(inputs)
elif isinstance(inputs, list) or isinstance(inputs, tuple):
if len(inputs) != len(self.__uninitialized_inputs):
Reported by Pylint.
Line: 37
Column: 14
output_values = None
if isinstance(inputs, dict):
output_values = self.__core.run(inputs)
elif isinstance(inputs, list) or isinstance(inputs, tuple):
if len(inputs) != len(self.__uninitialized_inputs):
raise RuntimeError('Expected {} values for uninitialized '
'graph inputs ({}), but got {}.'.format(
len(self.__uninitialized_inputs),
', '.join(self.__uninitialized_inputs),
Reported by Pylint.
test/cpp/api/modules.cpp
11 issues
Line: 3418
CWE codes:
562
// NOLINTNEXTLINE(bugprone-argument-comment)
ref_attn_weight = torch::sum(ref_attn_weight, /*axis=*/1) / b2;
reference = _batchmatmul(reference, V);
return std::tie(reference, ref_attn_weight);
}
torch::Tensor _split_heads_ref(const torch::Tensor& X, at::IntArrayRef dims, int nheads, int d_head) {
auto X_split = X.reshape({dims[0], dims[1], nheads, d_head});
auto X_split_transposed = X_split.permute({0, 2, 1, 3});
Reported by Cppcheck.
Line: 549
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto s = output.sum();
s.backward();
ASSERT_TRUE(torch::equal(output, expected));
ASSERT_TRUE(torch::equal(input.grad(), torch::ones_like(input)));
}
TEST_F(ModulesTest, Flatten) {
Flatten flatten;
Reported by FlawFinder.
Line: 550
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
s.backward();
ASSERT_TRUE(torch::equal(output, expected));
ASSERT_TRUE(torch::equal(input.grad(), torch::ones_like(input)));
}
TEST_F(ModulesTest, Flatten) {
Flatten flatten;
auto input = torch::tensor({{1, 3, 4}, {2, 5, 6}}, torch::dtype(torch::kFloat).requires_grad(true));
Reported by FlawFinder.
Line: 561
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto s = output.sum();
s.backward();
ASSERT_TRUE(torch::equal(output, expected));
ASSERT_TRUE(torch::equal(input.grad(), torch::ones_like(input)));
// Testing with optional arguments start_dim and end_dim
Flatten flatten_optional_dims(FlattenOptions().start_dim(2).end_dim(3));
input = torch::tensor({
Reported by FlawFinder.
Line: 562
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
s.backward();
ASSERT_TRUE(torch::equal(output, expected));
ASSERT_TRUE(torch::equal(input.grad(), torch::ones_like(input)));
// Testing with optional arguments start_dim and end_dim
Flatten flatten_optional_dims(FlattenOptions().start_dim(2).end_dim(3));
input = torch::tensor({
{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},
Reported by FlawFinder.
Line: 579
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
s = output.sum();
s.backward();
ASSERT_TRUE(torch::equal(output, expected));
ASSERT_TRUE(torch::equal(input.grad(), torch::ones_like(input)));
}
TEST_F(ModulesTest, Unflatten) {
// Non-named tensor
Reported by FlawFinder.
Line: 580
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
s = output.sum();
s.backward();
ASSERT_TRUE(torch::equal(output, expected));
ASSERT_TRUE(torch::equal(input.grad(), torch::ones_like(input)));
}
TEST_F(ModulesTest, Unflatten) {
// Non-named tensor
Unflatten unflatten(UnflattenOptions(0, {2, 2}));
Reported by FlawFinder.
Line: 588
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
Unflatten unflatten(UnflattenOptions(0, {2, 2}));
auto output = unflatten->forward(torch::tensor({1, 2, 3, 4}));
auto expected = torch::tensor({{1, 2}, {3, 4}});
ASSERT_TRUE(torch::equal(output, expected));
// Named tensor
auto make_dimnames = [](std::vector<std::string> names) {
std::vector<torch::Dimname> dimnames;
// NOLINTNEXTLINE(performance-for-range-copy)
Reported by FlawFinder.
Line: 610
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
torch::tensor({{1, 2, 3, 4}}).refine_names(make_dimnames({"A", "B"})));
expected = torch::tensor({{{1, 2}, {3, 4}}})
.refine_names(make_dimnames({"A", "B1", "B2"}));
ASSERT_TRUE(torch::equal(output, expected));
}
TEST_F(ModulesTest, AdaptiveMaxPool1d) {
AdaptiveMaxPool1d model(3);
auto x = torch::tensor({{{1, 2, 3, 4, 5}}}, torch::dtype(torch::kFloat).requires_grad(true));
Reported by FlawFinder.
Line: 1471
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
});
auto output = functional(torch::ones(5, torch::requires_grad()));
ASSERT_TRUE(was_called);
ASSERT_TRUE(output.equal(torch::ones(5, torch::requires_grad())));
was_called = false;
// Use the call operator overload here.
output = functional(torch::ones(5, torch::requires_grad()));
ASSERT_TRUE(was_called);
Reported by FlawFinder.
scripts/diagnose_protobuf.py
11 issues
Line: 37
Column: 1
try:
p = Popen([protoc_name, '--version'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
except:
print('DEBUG: did not find protoc binary.')
print('DEBUG: out: ' + out)
print('DEBUG: err: ' + err)
native_protobuf_installed = False
else:
Reported by Pylint.
Line: 18
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
import os
import re
from subprocess import Popen, PIPE
# Get python protobuf version.
try:
import google.protobuf
python_version = google.protobuf.__version__
Reported by Bandit.
Line: 24
Column: 5
try:
import google.protobuf
python_version = google.protobuf.__version__
python_protobuf_installed = True
except ImportError:
print("DEBUG: cannot find python protobuf install.")
python_protobuf_installed = False
if os.name == 'nt':
Reported by Pylint.
Line: 27
Column: 5
python_protobuf_installed = True
except ImportError:
print("DEBUG: cannot find python protobuf install.")
python_protobuf_installed = False
if os.name == 'nt':
protoc_name = 'protoc.exe'
else:
protoc_name = 'protoc'
Reported by Pylint.
Line: 30
Column: 5
python_protobuf_installed = False
if os.name == 'nt':
protoc_name = 'protoc.exe'
else:
protoc_name = 'protoc'
try:
p = Popen([protoc_name, '--version'], stdout=PIPE, stderr=PIPE)
Reported by Pylint.
Line: 32
Column: 5
if os.name == 'nt':
protoc_name = 'protoc.exe'
else:
protoc_name = 'protoc'
try:
p = Popen([protoc_name, '--version'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
except:
Reported by Pylint.
Line: 35
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b603_subprocess_without_shell_equals_true.html
protoc_name = 'protoc'
try:
p = Popen([protoc_name, '--version'], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
except:
print('DEBUG: did not find protoc binary.')
print('DEBUG: out: ' + out)
print('DEBUG: err: ' + err)
Reported by Bandit.
Line: 41
Column: 5
print('DEBUG: did not find protoc binary.')
print('DEBUG: out: ' + out)
print('DEBUG: err: ' + err)
native_protobuf_installed = False
else:
if p.returncode:
print('DEBUG: protoc returned a non-zero return code.')
print('DEBUG: out: ' + out)
print('DEBUG: err: ' + err)
Reported by Pylint.
Line: 47
Column: 9
print('DEBUG: protoc returned a non-zero return code.')
print('DEBUG: out: ' + out)
print('DEBUG: err: ' + err)
native_protobuf_installed = False
else:
tmp = re.search(r'\d\.\d\.\d', out)
if tmp:
native_version = tmp.group(0)
native_protobuf_installed = True
Reported by Pylint.
Line: 52
Column: 13
tmp = re.search(r'\d\.\d\.\d', out)
if tmp:
native_version = tmp.group(0)
native_protobuf_installed = True
else:
print('DEBUG: cannot parse protoc version string.')
print('DEBUG: out: ' + out)
native_protobuf_installed = False
Reported by Pylint.
caffe2/python/onnx/tests/ssa_test.py
11 issues
Line: 13
Column: 1
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from onnx import TensorProto
import caffe2.python.onnx.frontend as c2_onnx
from caffe2.python.onnx.helper import c2_native_run_net
from caffe2.python.onnx.tests.test_utils import TestCase
Reported by Pylint.
Line: 79
Column: 9
inputs=[X])
value_info = {'X': (TensorProto.FLOAT, X.shape)}
c2_onnx.Caffe2Frontend._ssa_rewrite(
net,
init_net,
value_info)
self.assertEqual(net.external_input, ['W', 'X', 'b', 's'])
Reported by Pylint.
Line: 130
Column: 9
value_info = {'X': (TensorProto.FLOAT, [4, 2])}
net_copy = copy.deepcopy(net)
c2_onnx.Caffe2Frontend._ssa_rewrite(
net_copy,
None,
value_info)
self.assertEqual(net, net_copy)
Reported by Pylint.
Line: 1
Column: 1
## @package onnx
# Module caffe2.python.onnx.tests.ssa_test
import copy
Reported by Pylint.
Line: 20
Column: 1
from caffe2.python.onnx.tests.test_utils import TestCase
class TestFrontendSSAConversion(TestCase):
def test_ssa(self):
X = np.random.randn(4, 2).astype(np.float32)
W = np.random.randn(3, 2).astype(np.float32)
b = np.random.randn(3).astype(np.float32)
s = np.random.randn(1).astype(np.float32)
Reported by Pylint.
Line: 21
Column: 5
class TestFrontendSSAConversion(TestCase):
def test_ssa(self):
X = np.random.randn(4, 2).astype(np.float32)
W = np.random.randn(3, 2).astype(np.float32)
b = np.random.randn(3).astype(np.float32)
s = np.random.randn(1).astype(np.float32)
np_result = X.dot(W.transpose()) + b + s
Reported by Pylint.
Line: 22
Column: 9
class TestFrontendSSAConversion(TestCase):
def test_ssa(self):
X = np.random.randn(4, 2).astype(np.float32)
W = np.random.randn(3, 2).astype(np.float32)
b = np.random.randn(3).astype(np.float32)
s = np.random.randn(1).astype(np.float32)
np_result = X.dot(W.transpose()) + b + s
Reported by Pylint.
Line: 23
Column: 9
class TestFrontendSSAConversion(TestCase):
def test_ssa(self):
X = np.random.randn(4, 2).astype(np.float32)
W = np.random.randn(3, 2).astype(np.float32)
b = np.random.randn(3).astype(np.float32)
s = np.random.randn(1).astype(np.float32)
np_result = X.dot(W.transpose()) + b + s
net = caffe2_pb2.NetDef()
Reported by Pylint.
Line: 24
Column: 9
def test_ssa(self):
X = np.random.randn(4, 2).astype(np.float32)
W = np.random.randn(3, 2).astype(np.float32)
b = np.random.randn(3).astype(np.float32)
s = np.random.randn(1).astype(np.float32)
np_result = X.dot(W.transpose()) + b + s
net = caffe2_pb2.NetDef()
net.name = 'test-ssa'
Reported by Pylint.
Line: 25
Column: 9
X = np.random.randn(4, 2).astype(np.float32)
W = np.random.randn(3, 2).astype(np.float32)
b = np.random.randn(3).astype(np.float32)
s = np.random.randn(1).astype(np.float32)
np_result = X.dot(W.transpose()) + b + s
net = caffe2_pb2.NetDef()
net.name = 'test-ssa'
net.external_input[:] = ['W', 'X', 'b', 's']
Reported by Pylint.