The following issues were found
caffe2/python/trt/test_pt_onnx_trt.py
57 issues
Line: 22
Column: 1
from PIL import Image
import numpy as np
import torch
import torchvision.models as models
import pycuda.driver as cuda
# This import causes pycuda to automatically manage CUDA context creation and cleanup.
import pycuda.autoinit
Reported by Pylint.
Line: 24
Column: 1
import torch
import torchvision.models as models
import pycuda.driver as cuda
# This import causes pycuda to automatically manage CUDA context creation and cleanup.
import pycuda.autoinit
import tensorrt as trt
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
Reported by Pylint.
Line: 26
Column: 1
import pycuda.driver as cuda
# This import causes pycuda to automatically manage CUDA context creation and cleanup.
import pycuda.autoinit
import tensorrt as trt
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
def allocate_buffers(engine):
Reported by Pylint.
Line: 28
Column: 1
# This import causes pycuda to automatically manage CUDA context creation and cleanup.
import pycuda.autoinit
import tensorrt as trt
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
def allocate_buffers(engine):
h_input = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(0)),
dtype=trt.nptype(trt.float32))
Reported by Pylint.
Line: 82
Column: 25
model = getattr(models, model_name)(pretrained=True)
shape = (1,) + input_shape
dummy_input = (torch.randn(shape),)
onnx_name = model_name + ".onnx"
torch.onnx.export(model,
dummy_input,
onnx_name,
Reported by Pylint.
Line: 26
Column: 1
import pycuda.driver as cuda
# This import causes pycuda to automatically manage CUDA context creation and cleanup.
import pycuda.autoinit
import tensorrt as trt
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
def allocate_buffers(engine):
Reported by Pylint.
Line: 43
Column: 9
def load_normalized_test_case(input_shape, test_image, pagelocked_buffer, normalization_hint):
def normalize_image(image):
c, h, w = input_shape
image_arr = np.asarray(image.resize((w, h), Image.ANTIALIAS)).transpose([2, 0, 1])\
.astype(trt.nptype(trt.float32)).ravel()
if (normalization_hint == 0):
return (image_arr / 255.0 - 0.45) / 0.225
elif (normalization_hint == 1):
Reported by Pylint.
Line: 98
Column: 21
h_input, d_input, h_output, d_output, stream = allocate_buffers(engine)
with engine.create_execution_context() as context:
err_count = 0
for index, f in enumerate(self.image_files):
test_case = load_normalized_test_case(input_shape, f,\
h_input, normalization_hint)
cuda.memcpy_htod_async(d_input, h_input, stream)
context.execute_async_v2(bindings=[d_input, d_output],
Reported by Pylint.
Line: 1
Column: 1
###################################################################################################
# ATTENTION! This test will most probably fail if you install TensorRT 6.0.1 only.
# That's because it's shipped with older version of ONNX parser not supporting some
# required features. To make it work please use new version: https://github.com/onnx/onnx-tensorrt
# Just clone it and do something like this:
#
# ~/pt/third_party/onnx-tensorrt$ mkdir build/
# ~/pt/third_party/onnx-tensorrt$ cd build/
# ~/pt/third_party/onnx-tensorrt/build$ cmake ..
Reported by Pylint.
Line: 31
Column: 1
import tensorrt as trt
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
def allocate_buffers(engine):
h_input = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(0)),
dtype=trt.nptype(trt.float32))
h_output = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(1)),
dtype=trt.nptype(trt.float32))
d_input = cuda.mem_alloc(h_input.nbytes)
Reported by Pylint.
torch/optim/_multi_tensor/adam.py
56 issues
Line: 3
Column: 1
import math
import torch
from ..optimizer import Optimizer
from collections import defaultdict
class Adam(Optimizer):
r"""Implements Adam algorithm with multi tensor APIs.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Reported by Pylint.
Line: 92
Column: 40
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
Reported by Pylint.
Line: 92
Column: 74
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
Reported by Pylint.
Line: 94
Column: 43
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
Reported by Pylint.
Line: 94
Column: 77
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
Reported by Pylint.
Line: 97
Column: 85
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
exp_avg_sq.append(state['exp_avg_sq'])
if amsgrad:
Reported by Pylint.
Line: 97
Column: 51
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
exp_avg_sq.append(state['exp_avg_sq'])
if amsgrad:
Reported by Pylint.
Line: 113
Column: 25
bias_correction1 = [1 - beta1 ** state['step'] for state in states]
bias_correction2 = [1 - beta2 ** state['step'] for state in states]
if group['weight_decay'] != 0:
grads = torch._foreach_add(grads, params_with_grad, alpha=group['weight_decay'])
#
# Decay the first and second moment running average coefficient
#
torch._foreach_mul_(exp_avg, beta1)
Reported by Pylint.
Line: 118
Column: 13
#
# Decay the first and second moment running average coefficient
#
torch._foreach_mul_(exp_avg, beta1)
torch._foreach_add_(exp_avg, grads, alpha=1 - beta1)
torch._foreach_mul_(exp_avg_sq, beta2)
torch._foreach_addcmul_(exp_avg_sq, grads, grads, 1 - beta2)
Reported by Pylint.
Line: 119
Column: 13
# Decay the first and second moment running average coefficient
#
torch._foreach_mul_(exp_avg, beta1)
torch._foreach_add_(exp_avg, grads, alpha=1 - beta1)
torch._foreach_mul_(exp_avg_sq, beta2)
torch._foreach_addcmul_(exp_avg_sq, grads, grads, 1 - beta2)
if amsgrad:
Reported by Pylint.
test/mobile/test_bytecode.py
56 issues
Line: 5
Column: 1
import io
import shutil
import tempfile
import torch
import torch.utils.show_pickle
# from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.jit.mobile import (
_load_for_lite_interpreter,
_get_model_bytecode_version,
Reported by Pylint.
Line: 6
Column: 1
import shutil
import tempfile
import torch
import torch.utils.show_pickle
# from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.jit.mobile import (
_load_for_lite_interpreter,
_get_model_bytecode_version,
_get_model_ops_and_info,
Reported by Pylint.
Line: 8
Column: 1
import torch
import torch.utils.show_pickle
# from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.jit.mobile import (
_load_for_lite_interpreter,
_get_model_bytecode_version,
_get_model_ops_and_info,
_backport_for_mobile_to_buffer,
_backport_for_mobile)
Reported by Pylint.
Line: 14
Column: 1
_get_model_ops_and_info,
_backport_for_mobile_to_buffer,
_backport_for_mobile)
from torch.testing._internal.common_utils import TestCase, run_tests
from pathlib import Path
pytorch_test_dir = Path(__file__).resolve().parents[1]
# script_module_v4.ptl and script_module_v5.ptl source code
Reported by Pylint.
Line: 286
Column: 13
# Backport model to v4
script_module_v4_buffer = _backport_for_mobile_to_buffer(
script_module_v5_path, maximum_checked_in_model_version - 1)
buf = io.StringIO()
# Check version of the model v4 from backport
bytesio = io.BytesIO(script_module_v4_buffer)
backport_version = _get_model_bytecode_version(bytesio)
assert(backport_version == maximum_checked_in_model_version - 1)
Reported by Pylint.
Line: 303
Column: 3
def test_get_model_ops_and_info(self):
# TODO update this to be more in the style of the above tests after a backport from 6 -> 5 exists
script_module_v6 = pytorch_test_dir / "cpp" / "jit" / "script_module_v6.ptl"
ops_v6 = _get_model_ops_and_info(script_module_v6)
assert(ops_v6["aten::add.int"].num_schema_args == 2)
assert(ops_v6["aten::add.Scalar"].num_schema_args == 2)
Reported by Pylint.
Line: 1
Column: 1
import fnmatch
import io
import shutil
import tempfile
import torch
import torch.utils.show_pickle
# from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.jit.mobile import (
_load_for_lite_interpreter,
Reported by Pylint.
Line: 15
Column: 1
_backport_for_mobile_to_buffer,
_backport_for_mobile)
from torch.testing._internal.common_utils import TestCase, run_tests
from pathlib import Path
pytorch_test_dir = Path(__file__).resolve().parents[1]
# script_module_v4.ptl and script_module_v5.ptl source code
# class TestModule(torch.nn.Module):
Reported by Pylint.
Line: 143
Column: 1
# Need to be updated when a bytecode version is completely retired
MINIMUM_TO_VERSION = 4
class testVariousModelVersions(TestCase):
def test_get_model_bytecode_version(self):
def check_model_version(model_path, expect_version):
actual_version = _get_model_bytecode_version(model_path)
assert(actual_version == expect_version)
for version, model_info in SCRIPT_MODULE_BYTECODE_PKL.items():
Reported by Pylint.
Line: 143
Column: 1
# Need to be updated when a bytecode version is completely retired
MINIMUM_TO_VERSION = 4
class testVariousModelVersions(TestCase):
def test_get_model_bytecode_version(self):
def check_model_version(model_path, expect_version):
actual_version = _get_model_bytecode_version(model_path)
assert(actual_version == expect_version)
for version, model_info in SCRIPT_MODULE_BYTECODE_PKL.items():
Reported by Pylint.
torch/utils/collect_env.py
56 issues
Line: 51
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b602_subprocess_popen_with_shell_equals_true.html
def run(command):
"""Returns (return-code, stdout, stderr)"""
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
raw_output, raw_err = p.communicate()
rc = p.returncode
if get_platform() == 'win32':
enc = 'oem'
else:
Reported by Bandit.
Line: 126
Column: 67
def get_gpu_info(run_lambda):
if get_platform() == 'darwin' or (TORCH_AVAILABLE and hasattr(torch.version, 'hip') and torch.version.hip is not None):
if TORCH_AVAILABLE and torch.cuda.is_available():
return torch.cuda.get_device_name(None)
return None
smi = get_nvidia_smi()
uuid_regex = re.compile(r' \(UUID: .+?\)')
Reported by Pylint.
Line: 126
Column: 93
def get_gpu_info(run_lambda):
if get_platform() == 'darwin' or (TORCH_AVAILABLE and hasattr(torch.version, 'hip') and torch.version.hip is not None):
if TORCH_AVAILABLE and torch.cuda.is_available():
return torch.cuda.get_device_name(None)
return None
smi = get_nvidia_smi()
uuid_regex = re.compile(r' \(UUID: .+?\)')
Reported by Pylint.
Line: 302
Column: 30
if TORCH_AVAILABLE:
version_str = torch.__version__
debug_mode_str = str(torch.version.debug)
cuda_available_str = str(torch.cuda.is_available())
cuda_version_str = torch.version.cuda
if not hasattr(torch.version, 'hip') or torch.version.hip is None: # cuda version
hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
else: # HIP version
Reported by Pylint.
Line: 304
Column: 28
version_str = torch.__version__
debug_mode_str = str(torch.version.debug)
cuda_available_str = str(torch.cuda.is_available())
cuda_version_str = torch.version.cuda
if not hasattr(torch.version, 'hip') or torch.version.hip is None: # cuda version
hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
else: # HIP version
cfg = torch._C._show_config().split('\n')
hip_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'HIP Runtime' in s][0]
Reported by Pylint.
Line: 305
Column: 24
debug_mode_str = str(torch.version.debug)
cuda_available_str = str(torch.cuda.is_available())
cuda_version_str = torch.version.cuda
if not hasattr(torch.version, 'hip') or torch.version.hip is None: # cuda version
hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
else: # HIP version
cfg = torch._C._show_config().split('\n')
hip_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'HIP Runtime' in s][0]
miopen_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'MIOpen' in s][0]
Reported by Pylint.
Line: 305
Column: 49
debug_mode_str = str(torch.version.debug)
cuda_available_str = str(torch.cuda.is_available())
cuda_version_str = torch.version.cuda
if not hasattr(torch.version, 'hip') or torch.version.hip is None: # cuda version
hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
else: # HIP version
cfg = torch._C._show_config().split('\n')
hip_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'HIP Runtime' in s][0]
miopen_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'MIOpen' in s][0]
Reported by Pylint.
Line: 312
Column: 36
hip_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'HIP Runtime' in s][0]
miopen_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'MIOpen' in s][0]
cuda_version_str = 'N/A'
hip_compiled_version = torch.version.hip
else:
version_str = debug_mode_str = cuda_available_str = cuda_version_str = 'N/A'
hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
sys_version = sys.version.replace("\n", " ")
Reported by Pylint.
Line: 308
Column: 19
if not hasattr(torch.version, 'hip') or torch.version.hip is None: # cuda version
hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
else: # HIP version
cfg = torch._C._show_config().split('\n')
hip_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'HIP Runtime' in s][0]
miopen_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'MIOpen' in s][0]
cuda_version_str = 'N/A'
hip_compiled_version = torch.version.hip
else:
Reported by Pylint.
Line: 308
Column: 19
if not hasattr(torch.version, 'hip') or torch.version.hip is None: # cuda version
hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
else: # HIP version
cfg = torch._C._show_config().split('\n')
hip_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'HIP Runtime' in s][0]
miopen_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'MIOpen' in s][0]
cuda_version_str = 'N/A'
hip_compiled_version = torch.version.hip
else:
Reported by Pylint.
benchmarks/operator_benchmark/pt/pool_test.py
56 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for MaxPool1d and AvgPool1d operators.
"""
# Configs for pool-1d ops
Reported by Pylint.
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for MaxPool1d and AvgPool1d operators.
"""
# Configs for pool-1d ops
Reported by Pylint.
Line: 10
Column: 25
"""
# Configs for pool-1d ops
pool_1d_configs_short = op_bench.config_list(
attr_names=[
'kernel', 'stride', 'N', 'C', 'L'
],
attrs=[
[3, 1, 8, 256, 256],
Reported by Pylint.
Line: 23
Column: 24
tags=['short']
)
pool_1d_configs_long = op_bench.cross_product_configs(
kernel=[3],
stride=[1, 2],
N=[8, 16],
C=[3],
L=[128, 256],
Reported by Pylint.
Line: 33
Column: 20
tags=['long']
)
pool_1d_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['MaxPool1d', nn.MaxPool1d],
['AvgPool1d', nn.AvgPool1d],
],
Reported by Pylint.
Line: 42
Column: 23
)
class Pool1dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, kernel, stride, N, C, L, device, op_func):
self.inputs = {
"input": torch.rand(N, C, L, device=device)
}
self.op_func = op_func(kernel, stride=stride)
Reported by Pylint.
Line: 53
Column: 1
return self.op_func(input)
op_bench.generate_pt_tests_from_op_list(pool_1d_ops_list,
pool_1d_configs_short + pool_1d_configs_long,
Pool1dBenchmark)
"""
Reported by Pylint.
Line: 64
Column: 25
# Configs for pool-2d ops
pool_2d_configs_short = op_bench.config_list(
attr_names=[
'kernel', 'stride', 'N', 'C', 'H', 'W'
],
attrs=[
[[3, 1], [2, 1], 1, 16, 32, 32],
Reported by Pylint.
Line: 77
Column: 24
tags=['short']
)
pool_2d_configs_long = op_bench.cross_product_configs(
kernel=[[3, 2], [3, 3]],
stride=[[2, 2]],
N=[8, 16],
C=[32],
H=[32, 64],
Reported by Pylint.
Line: 88
Column: 20
tags=['long']
)
pool_2d_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['MaxPool2d', nn.MaxPool2d],
['AvgPool2d', nn.AvgPool2d],
['AdaptiveMaxPool2d', lambda kernel, stride: nn.AdaptiveMaxPool2d(kernel)],
Reported by Pylint.
test/jit/test_module_apis.py
56 issues
Line: 1
Column: 1
import torch
import os
import sys
from torch.testing._internal.jit_utils import JitTestCase
from typing import Dict, Any, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 4
Column: 1
import torch
import os
import sys
from torch.testing._internal.jit_utils import JitTestCase
from typing import Dict, Any, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 53
Column: 43
return x
@torch.jit.export
def _save_to_state_dict(self, destination: Dict[str, torch.Tensor],
prefix: str, keep_vars: bool):
self.customized_save_state_dict_called = True
return {"dummy": torch.ones(1)}
@torch.jit.export
Reported by Pylint.
Line: 54
Column: 37
@torch.jit.export
def _save_to_state_dict(self, destination: Dict[str, torch.Tensor],
prefix: str, keep_vars: bool):
self.customized_save_state_dict_called = True
return {"dummy": torch.ones(1)}
@torch.jit.export
def _load_from_state_dict(self,
Reported by Pylint.
Line: 54
Column: 50
@torch.jit.export
def _save_to_state_dict(self, destination: Dict[str, torch.Tensor],
prefix: str, keep_vars: bool):
self.customized_save_state_dict_called = True
return {"dummy": torch.ones(1)}
@torch.jit.export
def _load_from_state_dict(self,
Reported by Pylint.
Line: 60
Column: 39
@torch.jit.export
def _load_from_state_dict(self,
state_dict: Dict[str, torch.Tensor],
prefix: str, local_metadata: Any,
strict: bool, missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str]):
self.customized_load_state_dict_called = True
Reported by Pylint.
Line: 61
Column: 52
@torch.jit.export
def _load_from_state_dict(self,
state_dict: Dict[str, torch.Tensor],
prefix: str, local_metadata: Any,
strict: bool, missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str]):
self.customized_load_state_dict_called = True
return
Reported by Pylint.
Line: 61
Column: 39
@torch.jit.export
def _load_from_state_dict(self,
state_dict: Dict[str, torch.Tensor],
prefix: str, local_metadata: Any,
strict: bool, missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str]):
self.customized_load_state_dict_called = True
return
Reported by Pylint.
Line: 62
Column: 53
def _load_from_state_dict(self,
state_dict: Dict[str, torch.Tensor],
prefix: str, local_metadata: Any,
strict: bool, missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str]):
self.customized_load_state_dict_called = True
return
Reported by Pylint.
Line: 62
Column: 39
def _load_from_state_dict(self,
state_dict: Dict[str, torch.Tensor],
prefix: str, local_metadata: Any,
strict: bool, missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str]):
self.customized_load_state_dict_called = True
return
Reported by Pylint.
torch/hub.py
56 issues
Line: 26
Column: 26
class tqdm(object): # type: ignore[no-redef]
def __init__(self, total=None, disable=False,
unit=None, unit_scale=None, unit_divisor=None):
self.total = total
self.disable = disable
self.n = 0
# ignore unit, unit_scale, unit_divisor; they're just for real tqdm
Reported by Pylint.
Line: 26
Column: 54
class tqdm(object): # type: ignore[no-redef]
def __init__(self, total=None, disable=False,
unit=None, unit_scale=None, unit_divisor=None):
self.total = total
self.disable = disable
self.n = 0
# ignore unit, unit_scale, unit_divisor; they're just for real tqdm
Reported by Pylint.
Line: 26
Column: 37
class tqdm(object): # type: ignore[no-redef]
def __init__(self, total=None, disable=False,
unit=None, unit_scale=None, unit_divisor=None):
self.total = total
self.disable = disable
self.n = 0
# ignore unit, unit_scale, unit_divisor; they're just for real tqdm
Reported by Pylint.
Line: 118
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b310-urllib-urlopen
def _read_url(url):
with urlopen(url) as r:
return r.read().decode(r.headers.get_content_charset('utf-8'))
def _validate_not_a_forked_repo(repo_owner, repo_name, branch):
# Use urlopen to avoid depending on local git.
Reported by Bandit.
Line: 254
Column: 5
Args:
d (string): path to a local folder to save downloaded models & weights.
"""
global _hub_dir
_hub_dir = d
def list(github, force_reload=False, skip_validation=False):
r"""
Reported by Pylint.
Line: 258
Column: 1
_hub_dir = d
def list(github, force_reload=False, skip_validation=False):
r"""
List all entrypoints available in `github` hubconf.
Args:
github (string): a string with format "repo_owner/repo_name[:tag_name]" with an optional
Reported by Pylint.
Line: 290
Column: 1
return entrypoints
def help(github, model, force_reload=False, skip_validation=False):
r"""
Show the docstring of entrypoint `model`.
Args:
github (string): a string with format <repo_owner/repo_name[:tag_name]> with an optional
Reported by Pylint.
Line: 322
Column: 3
# Ideally this should be `def load(github, model, *args, forece_reload=False, **kwargs):`,
# but Python2 complains syntax error for it. We have to skip force_reload in function
# signature here but detect it in kwargs instead.
# TODO: fix it after Python2 EOL
def load(repo_or_dir, model, *args, **kwargs):
r"""
Load a model from a github repo or a local directory.
Note: Loading a model is the typical use case, but this can also be used to
Reported by Pylint.
Line: 437
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b310-urllib-urlopen
# We use a different API for python2 since urllib(2) doesn't recognize the CA
# certificates in older Python
req = Request(url, headers={"User-Agent": "torch.hub"})
u = urlopen(req)
meta = u.info()
if hasattr(meta, 'getheaders'):
content_length = meta.getheaders("Content-Length")
else:
content_length = meta.get_all("Content-Length")
Reported by Bandit.
Line: 1
Column: 1
import errno
import hashlib
import json
import os
import re
import shutil
import sys
import tempfile
import torch
Reported by Pylint.
benchmarks/operator_benchmark/pt/add_test.py
56 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT add operator
add_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
Reported by Pylint.
Line: 7
Column: 20
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
# Configs for PT add operator
add_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
K=[256, 512],
device=['cpu', 'cuda'],
tags=["long"]
Reported by Pylint.
Line: 16
Column: 21
)
add_short_configs = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 1, 1],
[64, 64, 64],
[64, 64, 128],
Reported by Pylint.
Line: 30
Column: 20
)
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.inputs = {
"input_one": torch.rand(M, N, K, device=device, requires_grad=self.auto_set()),
"input_two": torch.rand(M, N, K, device=device, requires_grad=self.auto_set())
}
Reported by Pylint.
Line: 49
Column: 1
# ...
# Those names can be used to filter tests.
op_bench.generate_pt_test(add_long_configs + add_short_configs, AddBenchmark)
op_bench.generate_pt_gradient_test(add_long_configs + add_short_configs, AddBenchmark)
"""Mircobenchmark for addmm operator."""
Reported by Pylint.
Line: 50
Column: 1
# Those names can be used to filter tests.
op_bench.generate_pt_test(add_long_configs + add_short_configs, AddBenchmark)
op_bench.generate_pt_gradient_test(add_long_configs + add_short_configs, AddBenchmark)
"""Mircobenchmark for addmm operator."""
Reported by Pylint.
Line: 56
Column: 22
"""Mircobenchmark for addmm operator."""
class AddmmBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.inputs = {
"input_one": torch.rand(M, K, device=device, requires_grad=self.auto_set()),
"mat1": torch.rand(M, N, device=device, requires_grad=self.auto_set()),
"mat2": torch.rand(N, K, device=device, requires_grad=self.auto_set())
Reported by Pylint.
Line: 68
Column: 1
def forward(self, input_one, mat1, mat2):
return torch.addmm(input_one, mat1, mat2)
op_bench.generate_pt_test(add_long_configs + add_short_configs, AddmmBenchmark)
op_bench.generate_pt_gradient_test(add_long_configs + add_short_configs, AddmmBenchmark)
"""Mircobenchmark for addr operator."""
Reported by Pylint.
Line: 69
Column: 1
return torch.addmm(input_one, mat1, mat2)
op_bench.generate_pt_test(add_long_configs + add_short_configs, AddmmBenchmark)
op_bench.generate_pt_gradient_test(add_long_configs + add_short_configs, AddmmBenchmark)
"""Mircobenchmark for addr operator."""
Reported by Pylint.
Line: 75
Column: 21
"""Mircobenchmark for addr operator."""
class AddrBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, device, dtype):
self.inputs = {
"input_one": torch.rand((M, N), device=device, requires_grad=self.auto_set(), dtype=dtype),
"vec1": torch.rand((M,), device=device, requires_grad=self.auto_set(), dtype=dtype),
"vec2": torch.rand((N,), device=device, requires_grad=self.auto_set(), dtype=dtype)
Reported by Pylint.
caffe2/python/checkpoint.py
56 issues
Line: 142
Column: 9
files are saved
"""
if path_prefix:
db_name = path_prefix + get_ckpt_filename(node_name, epoch)
else:
ckpt_filename = get_ckpt_filename(node_name, epoch)
db_name = os.path.join(db_prefix, ckpt_filename)
return db_name
Reported by Pylint.
Line: 180
Column: 5
self._current_db_name = None
self._current_checkpoint_duration = None
"""
Initialize the checkpoint manager. Determines all blobs that need to be saved
or loads from a checkpoint.
Args:
nodes: An array of nodes where this checkpoint manager is running. Should
Reported by Pylint.
Line: 218
Column: 17
full_db_name = db_name(retrieve_from_epoch,
self._node_name, self._db_prefix, path_prefix)
db_type = path_type or self._db_type
logger.info("Initializing checkpoints from = %s"
% full_db_name)
ops.Load(
[], self._blob_names,
db=full_db_name,
db_type=db_type,
Reported by Pylint.
Line: 266
Column: 17
stats[self._current_db_name] = self._current_checkpoint_duration.fetch()[0]
else:
logger.info(
"Failed to collect checkpoint stats: {}".format(
self._current_db_name
)
)
def load(self, epoch, path_prefix=None, path_type=None):
Reported by Pylint.
Line: 281
Column: 9
epoch, self._node_name, self._db_prefix, path_prefix
)
db_type = path_type or self._db_type
logger.info("Loading checkpoints from = %s" % self._current_db_name)
def add_op():
ops.Load(
[],
self.blob_list(),
Reported by Pylint.
Line: 311
Column: 9
given epoch.
"""
self._current_db_name = db_name(epoch, self._node_name, self._db_prefix)
logger.info('Load from %s' % self._current_db_name)
def add_op():
ops.Load(
[],
blob_names,
Reported by Pylint.
Line: 325
Column: 9
return self._timed_task('checkpoint_partial_load', add_op)
def check_db_exists(self, epoch):
logger.info('Check existence of %s' %
db_name(epoch, self._node_name, self._db_prefix))
with Task() as task:
existence = ops.Const(False)
ops.DBExists(
[],
Reported by Pylint.
Line: 357
Column: 9
blobs present in the global workspace.
"""
self._current_db_name = db_name(epoch, self._node_name, self._db_prefix)
logger.info('Saving to %s' % self._current_db_name)
def add_op():
ops.Save(
self.blob_list(), [],
db=self._current_db_name,
Reported by Pylint.
Line: 395
Column: 26
last_epoch = self._metadata_handler.last_epoch(user_epoch=user_epoch)
return last_epoch
def set_params(self, nodes, path_prefix=None, path_type=None):
"""Set parameters associated with CP manager
Args:
nodes: An array of nodes where this checkpoint manager is running.
path_prefix: Used to construct db name or path where checkpoint files are
Reported by Pylint.
Line: 461
Column: 5
func(manager, *args, **kw)
return task_group
"""
Args:
nodes: An array of nodes where this checkpoint manager is running.
retrieve_from_epoch: Set to a number to load blobs from this epoch.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
Reported by Pylint.
caffe2/perfkernels/hp_emblookup_codegen.py
56 issues
Line: 10
Column: 27
sizeof = {"float": 4, "at::Half": 2, "uint8_t": 1}
def unroll(uf, IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
def compute(regid, InType, use_weights, isa, prefetch):
code = []
if InType == "float":
code.append(
Reported by Pylint.
Line: 10
Column: 35
sizeof = {"float": 4, "at::Half": 2, "uint8_t": 1}
def unroll(uf, IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
def compute(regid, InType, use_weights, isa, prefetch):
code = []
if InType == "float":
code.append(
Reported by Pylint.
Line: 10
Column: 16
sizeof = {"float": 4, "at::Half": 2, "uint8_t": 1}
def unroll(uf, IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
def compute(regid, InType, use_weights, isa, prefetch):
code = []
if InType == "float":
code.append(
Reported by Pylint.
Line: 11
Column: 24
def unroll(uf, IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
def compute(regid, InType, use_weights, isa, prefetch):
code = []
if InType == "float":
code.append(
" vop%d = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (%d)), vop%d);" # noqa
Reported by Pylint.
Line: 11
Column: 32
def unroll(uf, IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
def compute(regid, InType, use_weights, isa, prefetch):
code = []
if InType == "float":
code.append(
" vop%d = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (%d)), vop%d);" # noqa
Reported by Pylint.
Line: 11
Column: 45
def unroll(uf, IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
def compute(regid, InType, use_weights, isa, prefetch):
code = []
if InType == "float":
code.append(
" vop%d = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (%d)), vop%d);" # noqa
Reported by Pylint.
Line: 12
Column: 9
def unroll(uf, IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
def compute(regid, InType, use_weights, isa, prefetch):
code = []
if InType == "float":
code.append(
" vop%d = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (%d)), vop%d);" # noqa
% (regid, regid, regid)
Reported by Pylint.
Line: 51
Column: 5
return code
code = []
code.append(" // unrolling " + str(uf) + " times")
if use_offsets:
code.append(
" for ("
Reported by Pylint.
Line: 189
Column: 24
return code
def generic(IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
def compute(InType, use_weights, isa):
code = []
if InType == "float":
code.append(
" _mm256_storeu_ps(\n"
Reported by Pylint.
Line: 189
Column: 13
return code
def generic(IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
def compute(InType, use_weights, isa):
code = []
if InType == "float":
code.append(
" _mm256_storeu_ps(\n"
Reported by Pylint.