The following issues were found
test/distributed/elastic/events/lib_test.py
20 issues
Line: 15
Column: 1
from dataclasses import asdict
from unittest.mock import patch
from torch.distributed.elastic.events import (
Event,
EventSource,
NodeState,
RdzvEvent,
_get_or_create_logger,
Reported by Pylint.
Line: 23
Column: 1
_get_or_create_logger,
construct_and_record_rdzv_event,
)
from torch.testing._internal.common_utils import run_tests
class EventLibTest(unittest.TestCase):
def assert_event(self, actual_event, expected_event):
self.assertEqual(actual_event.name, expected_event.name)
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.abs
import json
Reported by Pylint.
Line: 26
Column: 1
from torch.testing._internal.common_utils import run_tests
class EventLibTest(unittest.TestCase):
def assert_event(self, actual_event, expected_event):
self.assertEqual(actual_event.name, expected_event.name)
self.assertEqual(actual_event.source, expected_event.source)
self.assertEqual(actual_event.timestamp, expected_event.timestamp)
self.assertDictEqual(actual_event.metadata, expected_event.metadata)
Reported by Pylint.
Line: 27
Column: 5
class EventLibTest(unittest.TestCase):
def assert_event(self, actual_event, expected_event):
self.assertEqual(actual_event.name, expected_event.name)
self.assertEqual(actual_event.source, expected_event.source)
self.assertEqual(actual_event.timestamp, expected_event.timestamp)
self.assertDictEqual(actual_event.metadata, expected_event.metadata)
Reported by Pylint.
Line: 34
Column: 5
self.assertDictEqual(actual_event.metadata, expected_event.metadata)
@patch("torch.distributed.elastic.events.get_logging_handler")
def test_get_or_create_logger(self, logging_handler_mock):
logging_handler_mock.return_value = logging.NullHandler()
logger = _get_or_create_logger("test_destination")
self.assertIsNotNone(logger)
self.assertEqual(1, len(logger.handlers))
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
Reported by Pylint.
Line: 41
Column: 5
self.assertEqual(1, len(logger.handlers))
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def test_event_created(self):
event = Event(
name="test_event",
source=EventSource.AGENT,
metadata={"key1": "value1", "key2": 2},
)
Reported by Pylint.
Line: 51
Column: 5
self.assertEqual(EventSource.AGENT, event.source)
self.assertDictEqual({"key1": "value1", "key2": 2}, event.metadata)
def test_event_deser(self):
event = Event(
name="test_event",
source=EventSource.AGENT,
metadata={"key1": "value1", "key2": 2, "key3": 1.0},
)
Reported by Pylint.
Line: 61
Column: 1
deser_event = Event.deserialize(json_event)
self.assert_event(event, deser_event)
class RdzvEventLibTest(unittest.TestCase):
@patch("torch.distributed.elastic.events.record_rdzv_event")
@patch("torch.distributed.elastic.events.get_logging_handler")
def test_construct_and_record_rdzv_event(self, get_mock, record_mock):
get_mock.return_value = logging.StreamHandler()
construct_and_record_rdzv_event(
Reported by Pylint.
Line: 64
Column: 5
class RdzvEventLibTest(unittest.TestCase):
@patch("torch.distributed.elastic.events.record_rdzv_event")
@patch("torch.distributed.elastic.events.get_logging_handler")
def test_construct_and_record_rdzv_event(self, get_mock, record_mock):
get_mock.return_value = logging.StreamHandler()
construct_and_record_rdzv_event(
run_id="test_run_id",
message="test_message",
node_state=NodeState.RUNNING,
Reported by Pylint.
caffe2/python/operator_test/adagrad_test_helper.py
20 issues
Line: 81
Column: 5
lr,
epsilon,
engine,
ref_adagrad,
gc,
dc,
row_wise=False,
weight_decay=0.0,
counter_halflife=-1,
Reported by Pylint.
Line: 83
Column: 5
engine,
ref_adagrad,
gc,
dc,
row_wise=False,
weight_decay=0.0,
counter_halflife=-1,
):
param, momentum, grad = inputs
Reported by Pylint.
Line: 1
Column: 1
from functools import partial
import caffe2.python.hypothesis_test_util as hu
import numpy as np
from caffe2.python import core
def ref_adagrad(
param_in,
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python import core
def ref_adagrad(
param_in,
mom_in,
grad,
lr,
epsilon,
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python import core
def ref_adagrad(
param_in,
mom_in,
grad,
lr,
epsilon,
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python import core
def ref_adagrad(
param_in,
mom_in,
grad,
lr,
epsilon,
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python import core
def ref_adagrad(
param_in,
mom_in,
grad,
lr,
epsilon,
Reported by Pylint.
Line: 41
Column: 9
param_out = param_in_f32 + grad_adj
if output_effective_lr_and_update:
if using_fp16:
return (
param_out.astype(np.float16),
mom_out.astype(np.float16),
effective_lr.astype(np.float16),
grad_adj.astype(np.float16),
Reported by Pylint.
Line: 56
Column: 9
grad_adj.astype(np.float32),
)
elif output_effective_lr:
if using_fp16:
return (
param_out.astype(np.float16),
mom_out.astype(np.float16),
effective_lr.astype(np.float16),
)
Reported by Pylint.
Line: 69
Column: 5
effective_lr.astype(np.float32),
)
if using_fp16:
return (param_out.astype(np.float16), mom_out.astype(np.float16))
else:
return (param_out.astype(np.float32), mom_out.astype(np.float32))
Reported by Pylint.
caffe2/python/operator_test/histogram_test.py
20 issues
Line: 4
Column: 1
import unittest
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given, settings
Reported by Pylint.
Line: 7
Column: 1
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given, settings
class TestHistogram(hu.HypothesisTestCase):
@given(rows=st.integers(1, 1000), cols=st.integers(1, 1000), **hu.gcs_cpu_only)
@settings(deadline=10000)
Reported by Pylint.
Line: 13
Column: 62
class TestHistogram(hu.HypothesisTestCase):
@given(rows=st.integers(1, 1000), cols=st.integers(1, 1000), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_histogram__device_consistency(self, rows, cols, gc, dc):
X = np.random.rand(rows, cols)
bin_edges = list(np.linspace(-2, 10, num=10000))
op = core.CreateOperator("Histogram", ["X"], ["histogram"], bin_edges=bin_edges)
self.assertDeviceChecks(dc, op, [X], [0])
Reported by Pylint.
Line: 1
Column: 1
import unittest
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given, settings
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given, settings
class TestHistogram(hu.HypothesisTestCase):
@given(rows=st.integers(1, 1000), cols=st.integers(1, 1000), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_histogram__device_consistency(self, rows, cols, gc, dc):
X = np.random.rand(rows, cols)
bin_edges = list(np.linspace(-2, 10, num=10000))
Reported by Pylint.
Line: 13
Column: 5
class TestHistogram(hu.HypothesisTestCase):
@given(rows=st.integers(1, 1000), cols=st.integers(1, 1000), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_histogram__device_consistency(self, rows, cols, gc, dc):
X = np.random.rand(rows, cols)
bin_edges = list(np.linspace(-2, 10, num=10000))
op = core.CreateOperator("Histogram", ["X"], ["histogram"], bin_edges=bin_edges)
self.assertDeviceChecks(dc, op, [X], [0])
Reported by Pylint.
Line: 13
Column: 5
class TestHistogram(hu.HypothesisTestCase):
@given(rows=st.integers(1, 1000), cols=st.integers(1, 1000), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_histogram__device_consistency(self, rows, cols, gc, dc):
X = np.random.rand(rows, cols)
bin_edges = list(np.linspace(-2, 10, num=10000))
op = core.CreateOperator("Histogram", ["X"], ["histogram"], bin_edges=bin_edges)
self.assertDeviceChecks(dc, op, [X], [0])
Reported by Pylint.
Line: 13
Column: 5
class TestHistogram(hu.HypothesisTestCase):
@given(rows=st.integers(1, 1000), cols=st.integers(1, 1000), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_histogram__device_consistency(self, rows, cols, gc, dc):
X = np.random.rand(rows, cols)
bin_edges = list(np.linspace(-2, 10, num=10000))
op = core.CreateOperator("Histogram", ["X"], ["histogram"], bin_edges=bin_edges)
self.assertDeviceChecks(dc, op, [X], [0])
Reported by Pylint.
Line: 14
Column: 9
@given(rows=st.integers(1, 1000), cols=st.integers(1, 1000), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_histogram__device_consistency(self, rows, cols, gc, dc):
X = np.random.rand(rows, cols)
bin_edges = list(np.linspace(-2, 10, num=10000))
op = core.CreateOperator("Histogram", ["X"], ["histogram"], bin_edges=bin_edges)
self.assertDeviceChecks(dc, op, [X], [0])
def test_histogram__valid_inputs_0(self):
Reported by Pylint.
Line: 16
Column: 9
def test_histogram__device_consistency(self, rows, cols, gc, dc):
X = np.random.rand(rows, cols)
bin_edges = list(np.linspace(-2, 10, num=10000))
op = core.CreateOperator("Histogram", ["X"], ["histogram"], bin_edges=bin_edges)
self.assertDeviceChecks(dc, op, [X], [0])
def test_histogram__valid_inputs_0(self):
workspace.FeedBlob(
"X", np.array([-2.0, -2.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 9.0])
Reported by Pylint.
caffe2/python/serialized_test/coverage.py
20 issues
Line: 18
Column: 12
`caffe2/python/serialized_test/SerializedTestCoverage.md`
'''
OpSchema = workspace.C.OpSchema
def gen_serialized_test_coverage(source_dir, output_dir):
(covered, not_covered, schemaless) = gen_coverage_sets(source_dir)
num_covered = len(covered)
Reported by Pylint.
Line: 12
Column: 1
import tempfile
from zipfile import ZipFile
'''
Generates a document in markdown format summrizing the coverage of serialized
testing. The document lives in
`caffe2/python/serialized_test/SerializedTestCoverage.md`
'''
Reported by Pylint.
Line: 73
Column: 20
not_covered_ops = set()
schemaless_ops = []
for op_name in core._GetRegisteredOperators():
s = OpSchema.get(op_name)
if s is not None and s.private:
continue
if s:
Reported by Pylint.
Line: 1
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import os
import tempfile
Reported by Pylint.
Line: 8
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import os
import tempfile
from zipfile import ZipFile
'''
Generates a document in markdown format summrizing the coverage of serialized
Reported by Pylint.
Line: 9
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import os
import tempfile
from zipfile import ZipFile
'''
Generates a document in markdown format summrizing the coverage of serialized
testing. The document lives in
Reported by Pylint.
Line: 10
Column: 1
from caffe2.python import core, workspace
import os
import tempfile
from zipfile import ZipFile
'''
Generates a document in markdown format summrizing the coverage of serialized
testing. The document lives in
`caffe2/python/serialized_test/SerializedTestCoverage.md`
Reported by Pylint.
Line: 21
Column: 1
OpSchema = workspace.C.OpSchema
def gen_serialized_test_coverage(source_dir, output_dir):
(covered, not_covered, schemaless) = gen_coverage_sets(source_dir)
num_covered = len(covered)
num_not_covered = len(not_covered)
num_schemaless = len(schemaless)
total_ops = num_covered + num_not_covered
Reported by Pylint.
Line: 28
Column: 79
num_schemaless = len(schemaless)
total_ops = num_covered + num_not_covered
with open(os.path.join(output_dir, 'SerializedTestCoverage.md'), 'w+') as f:
f.write('# Serialized Test Coverage Report\n')
f.write("This is an automatically generated file. Please see "
"`caffe2/python/serialized_test/README.md` for details. "
"In the case of merge conflicts, please rebase and regenerate.\n")
f.write('## Summary\n')
Reported by Pylint.
Line: 44
Column: 13
f.write(
'<summary>There are {} not covered operators</summary>\n\n'.format(
num_not_covered))
for n in sorted(not_covered):
f.write('* ' + n + '\n')
f.write('</details>\n\n')
f.write('## Covered operators\n')
f.write('<details>\n')
Reported by Pylint.
torch/distributed/nn/jit/instantiator.py
20 issues
Line: 21
Column: 1
_FILE_PREFIX = "_remote_module_"
_TEMP_DIR = tempfile.TemporaryDirectory()
INSTANTIATED_TEMPLATE_DIR_PATH = _TEMP_DIR.name
logger.info(f"Created a temporary directory at {INSTANTIATED_TEMPLATE_DIR_PATH}")
sys.path.append(INSTANTIATED_TEMPLATE_DIR_PATH)
def get_arg_return_types_from_interface(module_interface):
assert getattr(
Reported by Pylint.
Line: 29
Column: 22
assert getattr(
module_interface, "__torch_script_interface__", False
), "Expect a TorchScript class interface decorated by @torch.jit.interface."
qualified_name = torch._jit_internal._qualified_name(module_interface)
cu = torch.jit._state._python_cu
module_interface_c = cu.get_interface(qualified_name)
assert (
"forward" in module_interface_c.getMethodNames()
), "Expect forward in interface methods, while it has {}".format(
Reported by Pylint.
Line: 29
Column: 22
assert getattr(
module_interface, "__torch_script_interface__", False
), "Expect a TorchScript class interface decorated by @torch.jit.interface."
qualified_name = torch._jit_internal._qualified_name(module_interface)
cu = torch.jit._state._python_cu
module_interface_c = cu.get_interface(qualified_name)
assert (
"forward" in module_interface_c.getMethodNames()
), "Expect forward in interface methods, while it has {}".format(
Reported by Pylint.
Line: 30
Column: 10
module_interface, "__torch_script_interface__", False
), "Expect a TorchScript class interface decorated by @torch.jit.interface."
qualified_name = torch._jit_internal._qualified_name(module_interface)
cu = torch.jit._state._python_cu
module_interface_c = cu.get_interface(qualified_name)
assert (
"forward" in module_interface_c.getMethodNames()
), "Expect forward in interface methods, while it has {}".format(
module_interface_c.getMethodNames()
Reported by Pylint.
Line: 30
Column: 10
module_interface, "__torch_script_interface__", False
), "Expect a TorchScript class interface decorated by @torch.jit.interface."
qualified_name = torch._jit_internal._qualified_name(module_interface)
cu = torch.jit._state._python_cu
module_interface_c = cu.get_interface(qualified_name)
assert (
"forward" in module_interface_c.getMethodNames()
), "Expect forward in interface methods, while it has {}".format(
module_interface_c.getMethodNames()
Reported by Pylint.
Line: 76
Column: 25
old_text = None
if old_text != text:
with open(out_path, "w") as f:
logger.info("Writing {}".format(out_path))
f.write(text)
else:
logger.info("Skipped writing {}".format(out_path))
Reported by Pylint.
Line: 79
Column: 21
logger.info("Writing {}".format(out_path))
f.write(text)
else:
logger.info("Skipped writing {}".format(out_path))
def _do_instantiate_remote_module_template(
generated_module_name, str_dict, enable_moving_cpu_tensors_to_cuda
):
Reported by Pylint.
Line: 113
Column: 33
)
# Generate the template instance name.
module_interface_cls_name = torch._jit_internal._qualified_name(
module_interface_cls
).replace(".", "_")
generated_module_name = f"{_FILE_PREFIX}{module_interface_cls_name}"
# Generate type annotation strs.
Reported by Pylint.
Line: 113
Column: 33
)
# Generate the template instance name.
module_interface_cls_name = torch._jit_internal._qualified_name(
module_interface_cls
).replace(".", "_")
generated_module_name = f"{_FILE_PREFIX}{module_interface_cls_name}"
# Generate type annotation strs.
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/python3
import importlib
import logging
import os
import sys
import tempfile
from typing import Optional
import torch
Reported by Pylint.
test/onnx/pytorch_helper.py
20 issues
Line: 2
Column: 1
import io
import torch.onnx
import onnx
from caffe2.python.onnx.backend import Caffe2Backend
from caffe2.python.core import BlobReference, Net
_next_idx = 0
# Clone net takes a dict instead of a lambda
Reported by Pylint.
Line: 3
Column: 1
import io
import torch.onnx
import onnx
from caffe2.python.onnx.backend import Caffe2Backend
from caffe2.python.core import BlobReference, Net
_next_idx = 0
# Clone net takes a dict instead of a lambda
Reported by Pylint.
Line: 4
Column: 1
import io
import torch.onnx
import onnx
from caffe2.python.onnx.backend import Caffe2Backend
from caffe2.python.core import BlobReference, Net
_next_idx = 0
# Clone net takes a dict instead of a lambda
Reported by Pylint.
Line: 5
Column: 1
import torch.onnx
import onnx
from caffe2.python.onnx.backend import Caffe2Backend
from caffe2.python.core import BlobReference, Net
_next_idx = 0
# Clone net takes a dict instead of a lambda
# It should probably take a lambda, it is more flexible
Reported by Pylint.
Line: 50
Column: 9
value.
"""
if prefix_name is None:
global _next_idx
prefix_name = "pytorch_import_" + str(_next_idx) + "/"
_next_idx += 1
# TODO: handle the case where model cannot be exported
# and embed as a Python op in Caffe2
Reported by Pylint.
Line: 54
Column: 3
prefix_name = "pytorch_import_" + str(_next_idx) + "/"
_next_idx += 1
# TODO: handle the case where model cannot be exported
# and embed as a Python op in Caffe2
f = io.BytesIO()
torch.onnx.export(
model, sample_arguments, f, export_params=True)
onnx_model = onnx.load(io.BytesIO(f.getvalue()))
Reported by Pylint.
Line: 1
Column: 1
import io
import torch.onnx
import onnx
from caffe2.python.onnx.backend import Caffe2Backend
from caffe2.python.core import BlobReference, Net
_next_idx = 0
# Clone net takes a dict instead of a lambda
Reported by Pylint.
Line: 4
Column: 1
import io
import torch.onnx
import onnx
from caffe2.python.onnx.backend import Caffe2Backend
from caffe2.python.core import BlobReference, Net
_next_idx = 0
# Clone net takes a dict instead of a lambda
Reported by Pylint.
Line: 5
Column: 1
import torch.onnx
import onnx
from caffe2.python.onnx.backend import Caffe2Backend
from caffe2.python.core import BlobReference, Net
_next_idx = 0
# Clone net takes a dict instead of a lambda
# It should probably take a lambda, it is more flexible
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python.core import BlobReference, Net
_next_idx = 0
# Clone net takes a dict instead of a lambda
# It should probably take a lambda, it is more flexible
# We fake dict here
Reported by Pylint.
tools/linter/install/download_bin.py
20 issues
Line: 30
Column: 5
if not os.path.exists(path):
return ""
hash = hashlib.sha256()
# Open the file in binary mode and hash it.
with open(path, "rb") as f:
for b in f:
hash.update(b)
Reported by Pylint.
Line: 67
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b310-urllib-urlopen
# Try to download binary.
print(f"Downloading {name} to {output_dir}")
try:
urllib.request.urlretrieve(
url,
filename,
reporthook=report_download_progress if sys.stdout.isatty() else None,
)
except urllib.error.URLError as e:
Reported by Bandit.
Line: 1
Column: 1
import platform
import sys
import stat
import hashlib
import subprocess
import os
import urllib.request
import urllib.error
Reported by Pylint.
Line: 5
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
import sys
import stat
import hashlib
import subprocess
import os
import urllib.request
import urllib.error
from typing import Dict
Reported by Bandit.
Line: 16
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b603_subprocess_without_shell_equals_true.html
HOST_PLATFORM = platform.system()
# PyTorch directory root
result = subprocess.run(
["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE, check=True,
)
PYTORCH_ROOT = result.stdout.decode("utf-8").strip()
HASH_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "hashes")
Reported by Bandit.
Line: 16
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b607_start_process_with_partial_path.html
HOST_PLATFORM = platform.system()
# PyTorch directory root
result = subprocess.run(
["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE, check=True,
)
PYTORCH_ROOT = result.stdout.decode("utf-8").strip()
HASH_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "hashes")
Reported by Bandit.
Line: 33
Column: 30
hash = hashlib.sha256()
# Open the file in binary mode and hash it.
with open(path, "rb") as f:
for b in f:
hash.update(b)
# Return the hash as a hexadecimal string.
return hash.hexdigest()
Reported by Pylint.
Line: 34
Column: 13
# Open the file in binary mode and hash it.
with open(path, "rb") as f:
for b in f:
hash.update(b)
# Return the hash as a hexadecimal string.
return hash.hexdigest()
Reported by Pylint.
Line: 49
Column: 9
"""
if file_size != -1:
percent = min(1, (chunk_number * chunk_size) / file_size)
bar = "#" * int(64 * percent)
sys.stdout.write("\r0% |{:<64}| {}%".format(bar, int(percent * 100)))
def download_bin(name: str, output_dir: str, platform_to_url: Dict[str, str]) -> bool:
"""
Reported by Pylint.
Line: 55
Column: 1
def download_bin(name: str, output_dir: str, platform_to_url: Dict[str, str]) -> bool:
"""
Downloads the binary appropriate for the host platform and stores it in the given output directory.
"""
if HOST_PLATFORM not in platform_to_url:
print(f"Unsupported platform: {HOST_PLATFORM}")
return False
Reported by Pylint.
torch/fx/experimental/fx2trt/converters/quantization.py
20 issues
Line: 2
Column: 1
import torch
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import get_dyn_range, get_inputs_from_args_and_kwargs
quantize_per_tensor_inputs = ["input", "scale", "zero_point", "dtype"]
Reported by Pylint.
Line: 5
Column: 1
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import get_dyn_range, get_inputs_from_args_and_kwargs
quantize_per_tensor_inputs = ["input", "scale", "zero_point", "dtype"]
Reported by Pylint.
Line: 12
Column: 21
@tensorrt_converter("dequantize")
@tensorrt_converter(torch.dequantize)
@tensorrt_converter(torch.nn.quantized.modules.DeQuantize)
def dequantize(network, submod, args, kwargs, layer_name):
input_val = args[0]
if not isinstance(input_val, trt.tensorrt.ITensor):
Reported by Pylint.
Line: 24
Column: 21
return input_val
@tensorrt_converter(torch.quantize_per_tensor)
@tensorrt_converter(torch.nn.quantized.modules.Quantize)
def quantize(network, submod, args, kwargs, layer_name):
# If submod is not nn.Module then it's quantize_per_tensor
if not isinstance(submod, torch.nn.Module):
input_val, scale, zero_point, dtype = get_inputs_from_args_and_kwargs(args, kwargs, quantize_per_tensor_inputs)
Reported by Pylint.
Line: 40
Column: 17
raise RuntimeError(f'Quantize received input {input_val} that is not part '
'of the TensorRT region!')
if dtype != torch.quint8:
raise RuntimeError(f"Only support torch.quint8 quantized type for activation, get {dtype}.")
input_val.dynamic_range = get_dyn_range(scale, zero_point, dtype)
return input_val
Reported by Pylint.
Line: 14
Column: 39
@tensorrt_converter("dequantize")
@tensorrt_converter(torch.dequantize)
@tensorrt_converter(torch.nn.quantized.modules.DeQuantize)
def dequantize(network, submod, args, kwargs, layer_name):
input_val = args[0]
if not isinstance(input_val, trt.tensorrt.ITensor):
raise RuntimeError(f'Dequantize received input {input_val} that is not part '
'of the TensorRT region!')
Reported by Pylint.
Line: 14
Column: 16
@tensorrt_converter("dequantize")
@tensorrt_converter(torch.dequantize)
@tensorrt_converter(torch.nn.quantized.modules.DeQuantize)
def dequantize(network, submod, args, kwargs, layer_name):
input_val = args[0]
if not isinstance(input_val, trt.tensorrt.ITensor):
raise RuntimeError(f'Dequantize received input {input_val} that is not part '
'of the TensorRT region!')
Reported by Pylint.
Line: 14
Column: 25
@tensorrt_converter("dequantize")
@tensorrt_converter(torch.dequantize)
@tensorrt_converter(torch.nn.quantized.modules.DeQuantize)
def dequantize(network, submod, args, kwargs, layer_name):
input_val = args[0]
if not isinstance(input_val, trt.tensorrt.ITensor):
raise RuntimeError(f'Dequantize received input {input_val} that is not part '
'of the TensorRT region!')
Reported by Pylint.
Line: 14
Column: 47
@tensorrt_converter("dequantize")
@tensorrt_converter(torch.dequantize)
@tensorrt_converter(torch.nn.quantized.modules.DeQuantize)
def dequantize(network, submod, args, kwargs, layer_name):
input_val = args[0]
if not isinstance(input_val, trt.tensorrt.ITensor):
raise RuntimeError(f'Dequantize received input {input_val} that is not part '
'of the TensorRT region!')
Reported by Pylint.
Line: 26
Column: 45
@tensorrt_converter(torch.quantize_per_tensor)
@tensorrt_converter(torch.nn.quantized.modules.Quantize)
def quantize(network, submod, args, kwargs, layer_name):
# If submod is not nn.Module then it's quantize_per_tensor
if not isinstance(submod, torch.nn.Module):
input_val, scale, zero_point, dtype = get_inputs_from_args_and_kwargs(args, kwargs, quantize_per_tensor_inputs)
else:
input_val = args[0]
Reported by Pylint.
tools/stats/test_history.py
20 issues
Line: 1
Column: 1
#!/usr/bin/env python3
import argparse
import subprocess
import sys
from datetime import datetime, timezone
from signal import SIG_DFL, SIGPIPE, signal
from typing import Dict, Iterator, List, Optional, Set, Tuple
Reported by Pylint.
Line: 4
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
#!/usr/bin/env python3
import argparse
import subprocess
import sys
from datetime import datetime, timezone
from signal import SIG_DFL, SIGPIPE, signal
from typing import Dict, Iterator, List, Optional, Set, Tuple
Reported by Bandit.
Line: 14
Column: 1
get_test_stats_summaries)
def get_git_commit_history(
*,
path: str,
ref: str
) -> List[Tuple[str, datetime]]:
rc = subprocess.check_output(
Reported by Pylint.
Line: 19
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b603_subprocess_without_shell_equals_true.html
path: str,
ref: str
) -> List[Tuple[str, datetime]]:
rc = subprocess.check_output(
['git', '-C', path, 'log', '--pretty=format:%H %ct', ref],
).decode("latin-1")
return [
(x[0], datetime.fromtimestamp(int(x[1]), tz=timezone.utc))
for x in [line.split(" ") for line in rc.split("\n")]
Reported by Bandit.
Line: 19
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b607_start_process_with_partial_path.html
path: str,
ref: str
) -> List[Tuple[str, datetime]]:
rc = subprocess.check_output(
['git', '-C', path, 'log', '--pretty=format:%H %ct', ref],
).decode("latin-1")
return [
(x[0], datetime.fromtimestamp(int(x[1]), tz=timezone.utc))
for x in [line.split(" ") for line in rc.split("\n")]
Reported by Bandit.
Line: 19
Column: 5
path: str,
ref: str
) -> List[Tuple[str, datetime]]:
rc = subprocess.check_output(
['git', '-C', path, 'log', '--pretty=format:%H %ct', ref],
).decode("latin-1")
return [
(x[0], datetime.fromtimestamp(int(x[1]), tz=timezone.utc))
for x in [line.split(" ") for line in rc.split("\n")]
Reported by Pylint.
Line: 28
Column: 1
]
def make_column(
*,
data: Optional[Report],
filename: Optional[str],
suite_name: Optional[str],
test_name: str,
Reported by Pylint.
Line: 49
Column: 13
case = cases[0]
status = case['status']
omitted = len(cases) - 1
if status:
return f'{status.rjust(num_length)} ', omitted
else:
return f'{case["seconds"]:{num_length}.{decimals}f}s', omitted
else:
return f'{"absent".rjust(num_length)} ', 0
Reported by Pylint.
Line: 59
Column: 1
return ' ' * (num_length + 1), 0
def make_columns(
*,
jobs: List[str],
jsons: Dict[str, Report],
omitted: Dict[str, int],
filename: Optional[str],
Reported by Pylint.
Line: 92
Column: 1
return ' '.join(columns)
def make_lines(
*,
jobs: Set[str],
jsons: Dict[str, List[Report]],
filename: Optional[str],
suite_name: Optional[str],
Reported by Pylint.
test/onnx/model_defs/srresnet.py
20 issues
Line: 3
Column: 1
import math
from torch import nn
from torch.nn import init
def _initialize_orthogonal(conv):
prelu_gain = math.sqrt(2)
init.orthogonal(conv.weight, gain=prelu_gain)
Reported by Pylint.
Line: 4
Column: 1
import math
from torch import nn
from torch.nn import init
def _initialize_orthogonal(conv):
prelu_gain = math.sqrt(2)
init.orthogonal(conv.weight, gain=prelu_gain)
Reported by Pylint.
Line: 1
Column: 1
import math
from torch import nn
from torch.nn import init
def _initialize_orthogonal(conv):
prelu_gain = math.sqrt(2)
init.orthogonal(conv.weight, gain=prelu_gain)
Reported by Pylint.
Line: 14
Column: 1
conv.bias.data.zero_()
class ResidualBlock(nn.Module):
def __init__(self, n_filters):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(n_filters, n_filters, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(n_filters)
self.prelu = nn.PReLU(n_filters)
Reported by Pylint.
Line: 14
Column: 1
conv.bias.data.zero_()
class ResidualBlock(nn.Module):
def __init__(self, n_filters):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(n_filters, n_filters, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(n_filters)
self.prelu = nn.PReLU(n_filters)
Reported by Pylint.
Line: 16
Column: 9
class ResidualBlock(nn.Module):
def __init__(self, n_filters):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(n_filters, n_filters, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(n_filters)
self.prelu = nn.PReLU(n_filters)
self.conv2 = nn.Conv2d(n_filters, n_filters, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(n_filters)
Reported by Pylint.
Line: 27
Column: 5
_initialize_orthogonal(self.conv1)
_initialize_orthogonal(self.conv2)
def forward(self, x):
residual = self.prelu(self.bn1(self.conv1(x)))
residual = self.bn2(self.conv2(residual))
return x + residual
Reported by Pylint.
Line: 27
Column: 5
_initialize_orthogonal(self.conv1)
_initialize_orthogonal(self.conv2)
def forward(self, x):
residual = self.prelu(self.bn1(self.conv1(x)))
residual = self.bn2(self.conv2(residual))
return x + residual
Reported by Pylint.
Line: 33
Column: 1
return x + residual
class UpscaleBlock(nn.Module):
def __init__(self, n_filters):
super(UpscaleBlock, self).__init__()
self.upscaling_conv = nn.Conv2d(n_filters, 4 * n_filters, kernel_size=3, padding=1)
self.upscaling_shuffler = nn.PixelShuffle(2)
self.upscaling = nn.PReLU(n_filters)
Reported by Pylint.
Line: 33
Column: 1
return x + residual
class UpscaleBlock(nn.Module):
def __init__(self, n_filters):
super(UpscaleBlock, self).__init__()
self.upscaling_conv = nn.Conv2d(n_filters, 4 * n_filters, kernel_size=3, padding=1)
self.upscaling_shuffler = nn.PixelShuffle(2)
self.upscaling = nn.PReLU(n_filters)
Reported by Pylint.