The following issues were found
caffe2/python/__init__.py
16 issues
Line: 5
Column: 3
from caffe2.proto import caffe2_pb2
import os
import sys
# TODO: refactor & remove the following alias
caffe2_pb2.CPU = caffe2_pb2.PROTO_CPU
caffe2_pb2.CUDA = caffe2_pb2.PROTO_CUDA
caffe2_pb2.MKLDNN = caffe2_pb2.PROTO_MKLDNN
caffe2_pb2.OPENGL = caffe2_pb2.PROTO_OPENGL
caffe2_pb2.OPENCL = caffe2_pb2.PROTO_OPENCL
Reported by Pylint.
Line: 1
Column: 1
from caffe2.proto import caffe2_pb2
import os
import sys
# TODO: refactor & remove the following alias
caffe2_pb2.CPU = caffe2_pb2.PROTO_CPU
caffe2_pb2.CUDA = caffe2_pb2.PROTO_CUDA
caffe2_pb2.MKLDNN = caffe2_pb2.PROTO_MKLDNN
caffe2_pb2.OPENGL = caffe2_pb2.PROTO_OPENGL
Reported by Pylint.
Line: 3
Column: 1
from caffe2.proto import caffe2_pb2
import os
import sys
# TODO: refactor & remove the following alias
caffe2_pb2.CPU = caffe2_pb2.PROTO_CPU
caffe2_pb2.CUDA = caffe2_pb2.PROTO_CUDA
caffe2_pb2.MKLDNN = caffe2_pb2.PROTO_MKLDNN
caffe2_pb2.OPENGL = caffe2_pb2.PROTO_OPENGL
Reported by Pylint.
Line: 4
Column: 1
from caffe2.proto import caffe2_pb2
import os
import sys
# TODO: refactor & remove the following alias
caffe2_pb2.CPU = caffe2_pb2.PROTO_CPU
caffe2_pb2.CUDA = caffe2_pb2.PROTO_CUDA
caffe2_pb2.MKLDNN = caffe2_pb2.PROTO_MKLDNN
caffe2_pb2.OPENGL = caffe2_pb2.PROTO_OPENGL
Reported by Pylint.
Line: 16
Column: 5
caffe2_pb2.COMPILE_TIME_MAX_DEVICE_TYPES = caffe2_pb2.PROTO_COMPILE_TIME_MAX_DEVICE_TYPES
if sys.platform == "win32":
is_conda = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))
py_dll_path = os.path.join(os.path.dirname(sys.executable), 'Library', 'bin')
th_root = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'torch')
th_dll_path = os.path.join(th_root, 'lib')
if not os.path.exists(os.path.join(th_dll_path, 'nvToolsExt64_1.dll')) and \
Reported by Pylint.
Line: 24
Column: 1
if not os.path.exists(os.path.join(th_dll_path, 'nvToolsExt64_1.dll')) and \
not os.path.exists(os.path.join(py_dll_path, 'nvToolsExt64_1.dll')):
nvtoolsext_dll_path = os.path.join(
os.getenv('NVTOOLSEXT_PATH', 'C:\\Program Files\\NVIDIA Corporation\\NvToolsExt'), 'bin', 'x64')
else:
nvtoolsext_dll_path = ''
import importlib.util
import glob
Reported by Pylint.
Line: 26
Column: 9
nvtoolsext_dll_path = os.path.join(
os.getenv('NVTOOLSEXT_PATH', 'C:\\Program Files\\NVIDIA Corporation\\NvToolsExt'), 'bin', 'x64')
else:
nvtoolsext_dll_path = ''
import importlib.util
import glob
spec = importlib.util.spec_from_file_location('torch_version', os.path.join(th_root, 'version.py'))
torch_version = importlib.util.module_from_spec(spec)
Reported by Pylint.
Line: 30
Column: 1
import importlib.util
import glob
spec = importlib.util.spec_from_file_location('torch_version', os.path.join(th_root, 'version.py'))
torch_version = importlib.util.module_from_spec(spec)
spec.loader.exec_module(torch_version)
if torch_version.cuda and len(glob.glob(os.path.join(th_dll_path, 'cudart64*.dll'))) == 0 and \
len(glob.glob(os.path.join(py_dll_path, 'cudart64*.dll'))) == 0:
cuda_version = torch_version.cuda
Reported by Pylint.
Line: 41
Column: 9
default_path = 'C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v' + cuda_version
cuda_path = os.path.join(os.getenv(cuda_path_var, default_path), 'bin')
else:
cuda_path = ''
import ctypes
kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True)
dll_paths = list(filter(os.path.exists, [th_dll_path, py_dll_path, nvtoolsext_dll_path, cuda_path]))
with_load_library_flags = hasattr(kernel32, 'AddDllDirectory')
Reported by Pylint.
Line: 45
Column: 1
import ctypes
kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True)
dll_paths = list(filter(os.path.exists, [th_dll_path, py_dll_path, nvtoolsext_dll_path, cuda_path]))
with_load_library_flags = hasattr(kernel32, 'AddDllDirectory')
prev_error_mode = kernel32.SetErrorMode(0x0001)
kernel32.LoadLibraryW.restype = ctypes.c_void_p
if with_load_library_flags:
Reported by Pylint.
caffe2/python/modeling/gradient_clipping_test.py
16 issues
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 28
Column: 1
import numpy as np
class GradientClippingTest(unittest.TestCase):
def test_gradient_clipping_by_norm(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
Reported by Pylint.
Line: 29
Column: 5
class GradientClippingTest(unittest.TestCase):
def test_gradient_clipping_by_norm(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
Reported by Pylint.
Line: 38
Column: 9
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
Reported by Pylint.
Line: 62
Column: 5
# 5 forward ops + 6 backward ops + 2 * (3 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 17)
def test_gradient_clipping_by_norm_l1_norm(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
Reported by Pylint.
Line: 71
Column: 9
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
Reported by Pylint.
Line: 95
Column: 5
# 5 forward ops + 6 backward ops + 2 * (2 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 15)
def test_gradient_clipping_by_norm_using_param_norm(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
Reported by Pylint.
Line: 104
Column: 9
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
Reported by Pylint.
Line: 129
Column: 5
# 5 forward ops + 6 backward ops + 2 * (5 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 21)
def test_gradient_clipping_by_norm_compute_norm_ratio(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
Reported by Pylint.
Line: 138
Column: 9
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
Reported by Pylint.
benchmarks/functional_autograd_benchmark/vision_models.py
16 issues
Line: 1
Column: 1
import torch
from torch import Tensor
import torchvision_models as models
from utils import extract_weights, load_weights, GetterReturnType
from typing import cast
def get_resnet18(device: torch.device) -> GetterReturnType:
Reported by Pylint.
Line: 2
Column: 1
import torch
from torch import Tensor
import torchvision_models as models
from utils import extract_weights, load_weights, GetterReturnType
from typing import cast
def get_resnet18(device: torch.device) -> GetterReturnType:
Reported by Pylint.
Line: 74
Column: 9
inputs = torch.rand(N, 3, 800, 1200, device=device)
labels = []
for idx in range(N):
targets = {}
n_targets: int = int(torch.randint(5, 10, size=tuple()).item())
label = torch.randint(5, 10, size=(n_targets,))
targets["labels"] = label
boxes = torch.randint(100, 800, size=(n_targets, 4))
Reported by Pylint.
Line: 1
Column: 1
import torch
from torch import Tensor
import torchvision_models as models
from utils import extract_weights, load_weights, GetterReturnType
from typing import cast
def get_resnet18(device: torch.device) -> GetterReturnType:
Reported by Pylint.
Line: 7
Column: 1
from utils import extract_weights, load_weights, GetterReturnType
from typing import cast
def get_resnet18(device: torch.device) -> GetterReturnType:
N = 32
model = models.resnet18(pretrained=False)
criterion = torch.nn.CrossEntropyLoss()
Reported by Pylint.
Line: 9
Column: 1
from typing import cast
def get_resnet18(device: torch.device) -> GetterReturnType:
N = 32
model = models.resnet18(pretrained=False)
criterion = torch.nn.CrossEntropyLoss()
model.to(device)
params, names = extract_weights(model)
Reported by Pylint.
Line: 10
Column: 5
from typing import cast
def get_resnet18(device: torch.device) -> GetterReturnType:
N = 32
model = models.resnet18(pretrained=False)
criterion = torch.nn.CrossEntropyLoss()
model.to(device)
params, names = extract_weights(model)
Reported by Pylint.
Line: 28
Column: 1
return forward, params
def get_fcn_resnet(device: torch.device) -> GetterReturnType:
N = 8
criterion = torch.nn.MSELoss()
model = models.fcn_resnet50(pretrained=False, pretrained_backbone=False)
model.to(device)
params, names = extract_weights(model)
Reported by Pylint.
Line: 29
Column: 5
return forward, params
def get_fcn_resnet(device: torch.device) -> GetterReturnType:
N = 8
criterion = torch.nn.MSELoss()
model = models.fcn_resnet50(pretrained=False, pretrained_backbone=False)
model.to(device)
params, names = extract_weights(model)
Reported by Pylint.
Line: 48
Column: 1
return forward, params
def get_detr(device: torch.device) -> GetterReturnType:
# All values below are from CLI defaults in https://github.com/facebookresearch/detr
N = 2
num_classes = 91
hidden_dim = 256
nheads = 8
Reported by Pylint.
caffe2/python/mkl/mkl_conv_op_test.py
16 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLConvTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
Reported by Pylint.
Line: 30
Column: 59
**mu.gcs)
def test_mkl_convolution(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, group, gc, dc):
op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y"],
stride=stride,
Reported by Pylint.
Line: 52
Column: 5
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
Line: 17
Column: 1
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLConvTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 20),
input_channels=st.integers(1, 16),
Reported by Pylint.
Line: 28
Column: 5
use_bias=st.booleans(),
group=st.integers(1, 8),
**mu.gcs)
def test_mkl_convolution(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, group, gc, dc):
op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
Reported by Pylint.
Line: 28
Column: 5
use_bias=st.booleans(),
group=st.integers(1, 8),
**mu.gcs)
def test_mkl_convolution(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, group, gc, dc):
op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
Reported by Pylint.
Line: 28
Column: 5
use_bias=st.booleans(),
group=st.integers(1, 8),
**mu.gcs)
def test_mkl_convolution(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, group, gc, dc):
op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
Reported by Pylint.
benchmarks/operator_benchmark/pt/matmul_test.py
16 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for MatMul operator"""
# Configs for PT Matmul operator
mm_short_configs = op_bench.config_list(
attr_names=["M", "N", "K", "trans_a", "trans_b"],
attrs=[
Reported by Pylint.
Line: 7
Column: 20
"""Microbenchmarks for MatMul operator"""
# Configs for PT Matmul operator
mm_short_configs = op_bench.config_list(
attr_names=["M", "N", "K", "trans_a", "trans_b"],
attrs=[
[1, 1, 1, True, False],
[128, 128, 128, True, False],
[256, 256, 256, False, True],
Reported by Pylint.
Line: 21
Column: 19
)
mm_long_configs = op_bench.cross_product_configs(
M=[32],
N=[512, 128],
K=[64],
trans_a=[False, True],
trans_b=[True, False],
Reported by Pylint.
Line: 32
Column: 23
)
class MatMulBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, trans_a, trans_b, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device)
if trans_a
else torch.rand(N, M, device=device).t(),
Reported by Pylint.
Line: 48
Column: 1
return torch.matmul(input_one, input_two)
op_bench.generate_pt_test(mm_long_configs + mm_short_configs, MatMulBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for MatMul operator"""
# Configs for PT Matmul operator
mm_short_configs = op_bench.config_list(
attr_names=["M", "N", "K", "trans_a", "trans_b"],
attrs=[
Reported by Pylint.
Line: 34
Column: 9
class MatMulBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, trans_a, trans_b, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device)
if trans_a
else torch.rand(N, M, device=device).t(),
"input_two": torch.rand(N, K, device=device)
if trans_b
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for MatMul operator"""
# Configs for PT Matmul operator
mm_short_configs = op_bench.config_list(
attr_names=["M", "N", "K", "trans_a", "trans_b"],
attrs=[
Reported by Pylint.
Line: 32
Column: 1
)
class MatMulBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, trans_a, trans_b, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device)
if trans_a
else torch.rand(N, M, device=device).t(),
Reported by Pylint.
Line: 33
Column: 5
class MatMulBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, trans_a, trans_b, device):
self.inputs = {
"input_one": torch.rand(M, N, device=device)
if trans_a
else torch.rand(N, M, device=device).t(),
"input_two": torch.rand(N, K, device=device)
Reported by Pylint.
benchmarks/operator_benchmark/pt/qinstancenorm_test.py
16 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized instancenorm operator."""
instancenorm_configs_short = op_bench.cross_product_configs(
dims=(
Reported by Pylint.
Line: 8
Column: 30
"""Microbenchmarks for quantized instancenorm operator."""
instancenorm_configs_short = op_bench.cross_product_configs(
dims=(
(32, 8, 16),
(32, 8, 56, 56),
),
dtype=(torch.qint8,),
Reported by Pylint.
Line: 18
Column: 30
)
class QInstanceNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, dtype):
X = (torch.rand(*dims) - 0.5) * 256
num_channels = dims[1]
scale = 1.0
Reported by Pylint.
Line: 43
Column: 1
output_zero_point=Y_zero_point)
op_bench.generate_pt_test(instancenorm_configs_short, QInstanceNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 6
Column: 1
import torch
"""Microbenchmarks for quantized instancenorm operator."""
instancenorm_configs_short = op_bench.cross_product_configs(
dims=(
(32, 8, 16),
(32, 8, 56, 56),
Reported by Pylint.
Line: 26
Column: 9
scale = 1.0
zero_point = 0
self.inputs = {
"qX": torch.quantize_per_tensor(
X, scale=scale, zero_point=zero_point, dtype=dtype),
"weight": torch.rand(num_channels, dtype=torch.float),
"bias": torch.rand(num_channels, dtype=torch.float),
"eps": 1e-5,
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized instancenorm operator."""
instancenorm_configs_short = op_bench.cross_product_configs(
dims=(
Reported by Pylint.
Line: 18
Column: 1
)
class QInstanceNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, dtype):
X = (torch.rand(*dims) - 0.5) * 256
num_channels = dims[1]
scale = 1.0
Reported by Pylint.
Line: 20
Column: 5
class QInstanceNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, dtype):
X = (torch.rand(*dims) - 0.5) * 256
num_channels = dims[1]
scale = 1.0
zero_point = 0
Reported by Pylint.
Line: 21
Column: 9
class QInstanceNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, dtype):
X = (torch.rand(*dims) - 0.5) * 256
num_channels = dims[1]
scale = 1.0
zero_point = 0
self.inputs = {
Reported by Pylint.
benchmarks/operator_benchmark/common/tests/c2_cpu_gpu_forward_backward_test.py
16 issues
Line: 1
Column: 1
import operator_benchmark as op_bench
from caffe2.python import core
add_configs = op_bench.cross_product_configs(
M=[8],
N=[8],
K=[8],
tags=["short"],
Reported by Pylint.
Line: 2
Column: 1
import operator_benchmark as op_bench
from caffe2.python import core
add_configs = op_bench.cross_product_configs(
M=[8],
N=[8],
K=[8],
tags=["short"],
Reported by Pylint.
Line: 16
Column: 9
class AddBenchmark(op_bench.Caffe2BenchmarkBase):
def init(self, M, N, K, device):
self.set_module_name("add")
self.input_one = self.tensor([M, N, K], device=device)
self.input_two = self.tensor([M, N, K], device=device)
self.input_one_grad = self.tensor([M, N, K], device=device)
self.input_two_grad = self.tensor([M, N, K], device=device)
self.output = self.tensor([M, N, K], device=device)
Reported by Pylint.
Line: 17
Column: 9
def init(self, M, N, K, device):
self.set_module_name("add")
self.input_one = self.tensor([M, N, K], device=device)
self.input_two = self.tensor([M, N, K], device=device)
self.input_one_grad = self.tensor([M, N, K], device=device)
self.input_two_grad = self.tensor([M, N, K], device=device)
self.output = self.tensor([M, N, K], device=device)
def forward(self):
Reported by Pylint.
Line: 18
Column: 9
self.set_module_name("add")
self.input_one = self.tensor([M, N, K], device=device)
self.input_two = self.tensor([M, N, K], device=device)
self.input_one_grad = self.tensor([M, N, K], device=device)
self.input_two_grad = self.tensor([M, N, K], device=device)
self.output = self.tensor([M, N, K], device=device)
def forward(self):
op = core.CreateOperator(
Reported by Pylint.
Line: 19
Column: 9
self.input_one = self.tensor([M, N, K], device=device)
self.input_two = self.tensor([M, N, K], device=device)
self.input_one_grad = self.tensor([M, N, K], device=device)
self.input_two_grad = self.tensor([M, N, K], device=device)
self.output = self.tensor([M, N, K], device=device)
def forward(self):
op = core.CreateOperator(
"Add", [self.input_one, self.input_two], self.output, **self.args
Reported by Pylint.
Line: 20
Column: 9
self.input_two = self.tensor([M, N, K], device=device)
self.input_one_grad = self.tensor([M, N, K], device=device)
self.input_two_grad = self.tensor([M, N, K], device=device)
self.output = self.tensor([M, N, K], device=device)
def forward(self):
op = core.CreateOperator(
"Add", [self.input_one, self.input_two], self.output, **self.args
)
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
from caffe2.python import core
add_configs = op_bench.cross_product_configs(
M=[8],
N=[8],
K=[8],
tags=["short"],
Reported by Pylint.
Line: 13
Column: 1
device=["cuda", "cpu"]
)
class AddBenchmark(op_bench.Caffe2BenchmarkBase):
def init(self, M, N, K, device):
self.set_module_name("add")
self.input_one = self.tensor([M, N, K], device=device)
self.input_two = self.tensor([M, N, K], device=device)
self.input_one_grad = self.tensor([M, N, K], device=device)
Reported by Pylint.
Line: 14
Column: 5
)
class AddBenchmark(op_bench.Caffe2BenchmarkBase):
def init(self, M, N, K, device):
self.set_module_name("add")
self.input_one = self.tensor([M, N, K], device=device)
self.input_two = self.tensor([M, N, K], device=device)
self.input_one_grad = self.tensor([M, N, K], device=device)
self.input_two_grad = self.tensor([M, N, K], device=device)
Reported by Pylint.
caffe2/python/layers/conv.py
16 issues
Line: 1
Column: 1
## @package conv
# Module caffe2.python.layers.conv
from caffe2.python import schema
from caffe2.python.layers.layers import (
Reported by Pylint.
Line: 15
Column: 1
import numpy as np
class Conv(ModelLayer):
"""
Convolutional layer
Input:
- input_record: at least has the shape info of C (num_channels)
- output_dim: number of convolutional filters
Reported by Pylint.
Line: 28
Column: 5
- order: either 'NHWC' or 'NCHW'
"""
def __init__(self, model, input_record, output_dim, kernel_h, kernel_w,
stride_h, stride_w, pad_b=None, pad_l=None, pad_r=None,
pad_t=None, order='NHWC', kernel_init=None, bias_init=None,
kernel_optim=None, bias_optim=None,
name='conv', **kwargs):
Reported by Pylint.
Line: 28
Column: 5
- order: either 'NHWC' or 'NCHW'
"""
def __init__(self, model, input_record, output_dim, kernel_h, kernel_w,
stride_h, stride_w, pad_b=None, pad_l=None, pad_r=None,
pad_t=None, order='NHWC', kernel_init=None, bias_init=None,
kernel_optim=None, bias_optim=None,
name='conv', **kwargs):
Reported by Pylint.
Line: 34
Column: 9
kernel_optim=None, bias_optim=None,
name='conv', **kwargs):
super(Conv, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
# input num_channels (C) is needed
input_dims = input_record.field_type().shape
assert (kernel_h > 0 and isinstance(kernel_h, int)), (
Reported by Pylint.
Line: 35
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
name='conv', **kwargs):
super(Conv, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
# input num_channels (C) is needed
input_dims = input_record.field_type().shape
assert (kernel_h > 0 and isinstance(kernel_h, int)), (
"kernel_h should be positive integer")
Reported by Bandit.
Line: 39
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
# input num_channels (C) is needed
input_dims = input_record.field_type().shape
assert (kernel_h > 0 and isinstance(kernel_h, int)), (
"kernel_h should be positive integer")
assert (kernel_w > 0 and isinstance(kernel_w, int)), (
"kernel_w should be positive integer")
self.kernel_h = kernel_h
self.kernel_w = kernel_w
Reported by Bandit.
Line: 41
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert (kernel_h > 0 and isinstance(kernel_h, int)), (
"kernel_h should be positive integer")
assert (kernel_w > 0 and isinstance(kernel_w, int)), (
"kernel_w should be positive integer")
self.kernel_h = kernel_h
self.kernel_w = kernel_w
assert (stride_h > 0 and isinstance(stride_h, int)), (
Reported by Bandit.
Line: 46
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self.kernel_h = kernel_h
self.kernel_w = kernel_w
assert (stride_h > 0 and isinstance(stride_h, int)), (
"stride_h should be positive integer")
assert (stride_w > 0 and isinstance(stride_w, int)), (
"stride_w should be positive integer")
self.stride_h = stride_h
self.stride_w = stride_w
Reported by Bandit.
Line: 48
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert (stride_h > 0 and isinstance(stride_h, int)), (
"stride_h should be positive integer")
assert (stride_w > 0 and isinstance(stride_w, int)), (
"stride_w should be positive integer")
self.stride_h = stride_h
self.stride_w = stride_w
# output_dim calculation (http://cs231n.github.io/convolutional-networks/)
Reported by Bandit.
caffe2/python/operator_test/weight_scale_test.py
16 issues
Line: 24
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import functools
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
class TestWeightScale(hu.HypothesisTestCase):
@given(inputs=hu.tensors(n=1),
Reported by Pylint.
Line: 25
Column: 1
import caffe2.python.hypothesis_test_util as hu
import functools
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
class TestWeightScale(hu.HypothesisTestCase):
@given(inputs=hu.tensors(n=1),
ITER=st.integers(min_value=0, max_value=100),
Reported by Pylint.
Line: 35
Column: 86
upper_bound_iter=st.integers(min_value=5, max_value=100),
scale=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False),
**hu.gcs)
def test_weight_scale(self, inputs, ITER, stepsize, upper_bound_iter, scale, gc, dc):
ITER = np.array([ITER], dtype=np.int64)
op = core.CreateOperator(
"WeightScale", ["w", "iter"], ["nw"], stepsize=stepsize, upper_bound_iter=upper_bound_iter, scale=scale)
def ref_weight_scale(w, iter, stepsize, upper_bound_iter, scale):
Reported by Pylint.
Line: 40
Column: 33
op = core.CreateOperator(
"WeightScale", ["w", "iter"], ["nw"], stepsize=stepsize, upper_bound_iter=upper_bound_iter, scale=scale)
def ref_weight_scale(w, iter, stepsize, upper_bound_iter, scale):
iter = iter + 1
return [w * scale if iter % stepsize == 0 and iter < upper_bound_iter else w]
input_device_options = {'iter': hu.cpu_do}
self.assertReferenceChecks(
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 23
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import functools
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
class TestWeightScale(hu.HypothesisTestCase):
Reported by Pylint.
Line: 28
Column: 1
import hypothesis.strategies as st
import numpy as np
class TestWeightScale(hu.HypothesisTestCase):
@given(inputs=hu.tensors(n=1),
ITER=st.integers(min_value=0, max_value=100),
stepsize=st.integers(min_value=20, max_value=50),
upper_bound_iter=st.integers(min_value=5, max_value=100),
scale=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False),
Reported by Pylint.
Line: 35
Column: 5
upper_bound_iter=st.integers(min_value=5, max_value=100),
scale=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False),
**hu.gcs)
def test_weight_scale(self, inputs, ITER, stepsize, upper_bound_iter, scale, gc, dc):
ITER = np.array([ITER], dtype=np.int64)
op = core.CreateOperator(
"WeightScale", ["w", "iter"], ["nw"], stepsize=stepsize, upper_bound_iter=upper_bound_iter, scale=scale)
def ref_weight_scale(w, iter, stepsize, upper_bound_iter, scale):
Reported by Pylint.
Line: 35
Column: 5
upper_bound_iter=st.integers(min_value=5, max_value=100),
scale=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False),
**hu.gcs)
def test_weight_scale(self, inputs, ITER, stepsize, upper_bound_iter, scale, gc, dc):
ITER = np.array([ITER], dtype=np.int64)
op = core.CreateOperator(
"WeightScale", ["w", "iter"], ["nw"], stepsize=stepsize, upper_bound_iter=upper_bound_iter, scale=scale)
def ref_weight_scale(w, iter, stepsize, upper_bound_iter, scale):
Reported by Pylint.
Line: 35
Column: 5
upper_bound_iter=st.integers(min_value=5, max_value=100),
scale=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False),
**hu.gcs)
def test_weight_scale(self, inputs, ITER, stepsize, upper_bound_iter, scale, gc, dc):
ITER = np.array([ITER], dtype=np.int64)
op = core.CreateOperator(
"WeightScale", ["w", "iter"], ["nw"], stepsize=stepsize, upper_bound_iter=upper_bound_iter, scale=scale)
def ref_weight_scale(w, iter, stepsize, upper_bound_iter, scale):
Reported by Pylint.
caffe2/quantization/server/spatial_batch_norm_dnnlowp_op_test.py
16 issues
Line: 6
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, utils, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
Reported by Pylint.
Line: 11
Column: 1
from caffe2.python import core, dyndep, utils, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
Reported by Pylint.
Line: 35
Column: 9
self,
size,
input_channels,
output_channels,
batch_size,
order,
in_quantized,
out_quantized,
fuse_relu,
Reported by Pylint.
Line: 42
Column: 9
out_quantized,
fuse_relu,
gc,
dc,
):
X_min = -77
X_max = X_min + 255
X = np.round(np.random.rand(batch_size, size, size, input_channels)).astype(
np.float32
Reported by Pylint.
Line: 1
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, utils, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
Reported by Pylint.
Line: 18
Column: 1
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPOpSpatialBNTest(hu.HypothesisTestCase):
# correctness test with no quantization error in inputs
@given(
size=st.integers(10, 16),
input_channels=st.integers(2, 16),
output_channels=st.integers(2, 16),
Reported by Pylint.
Line: 30
Column: 5
out_quantized=st.booleans(),
fuse_relu=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_spatial_bn_int(
self,
size,
input_channels,
output_channels,
Reported by Pylint.
Line: 30
Column: 5
out_quantized=st.booleans(),
fuse_relu=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_spatial_bn_int(
self,
size,
input_channels,
output_channels,
Reported by Pylint.
Line: 30
Column: 5
out_quantized=st.booleans(),
fuse_relu=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_spatial_bn_int(
self,
size,
input_channels,
output_channels,
Reported by Pylint.
Line: 30
Column: 5
out_quantized=st.booleans(),
fuse_relu=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_spatial_bn_int(
self,
size,
input_channels,
output_channels,
Reported by Pylint.