The following issues were found
caffe2/python/parallelize_bmuf_distributed_test.py
26 issues
Line: 13
Column: 1
import shutil
import logging
from hypothesis import given, settings
import hypothesis.strategies as st
from caffe2.python import workspace
log = logging.getLogger("parallelize_bmuf_distributed_test")
Reported by Pylint.
Line: 14
Column: 1
import logging
from hypothesis import given, settings
import hypothesis.strategies as st
from caffe2.python import workspace
log = logging.getLogger("parallelize_bmuf_distributed_test")
log.setLevel(logging.INFO)
Reported by Pylint.
Line: 64
Column: 28
model.param_init_net.UniformFill([], ["sync_num"], shape=[1])
return [loss]
def _input_builder_fun(model):
return None
def _param_update_fun(model):
ITER = model.Iter("ITER")
LR = model.net.LearningRate(
Reported by Pylint.
Line: 92
Column: 17
batch_per_device = batch_size // len(devices)
for (j, g) in enumerate(devices):
st = j * batch_per_device
en = st + batch_per_device
data = full_data[st:en, :].astype(np.float32)
labels = full_labels[st:en].astype(np.float32)
with core.DeviceScope(core.DeviceOption(device_type, g)):
workspace.FeedBlob("{}_{}/data".format(device_prefix, g), data)
Reported by Pylint.
Line: 184
Column: 26
"{}_{}/fc_w_g".format(device_prefix, _device_pid(0, process_id)))
results['b_g_'] = b_g_
results['w_g_'] = w_g_
workspace.RunNetOnce(model._global_model_param_updates_net)
# g_b = (b_0_ + b_1_) / 2 - b_g_
# g_w = (w_0_ + w_1_) / 2 - w_g_
v_b = workspace.FetchBlob(
"{}_{}/fc_b_v".format(device_prefix, _device_pid(0, process_id)))
Reported by Pylint.
Line: 1
Column: 1
from multiprocessing import Process, Manager
import numpy as np
import unittest
import tempfile
Reported by Pylint.
Line: 8
Column: 1
from multiprocessing import Process, Manager
import numpy as np
import unittest
import tempfile
import shutil
import logging
from hypothesis import given, settings
Reported by Pylint.
Line: 9
Column: 1
import numpy as np
import unittest
import tempfile
import shutil
import logging
from hypothesis import given, settings
import hypothesis.strategies as st
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
import unittest
import tempfile
import shutil
import logging
from hypothesis import given, settings
import hypothesis.strategies as st
Reported by Pylint.
Line: 11
Column: 1
import unittest
import tempfile
import shutil
import logging
from hypothesis import given, settings
import hypothesis.strategies as st
from caffe2.python import workspace
Reported by Pylint.
test/test_modules.py
26 issues
Line: 1
Column: 1
import torch
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_modules import module_db, modules
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from)
from unittest.mock import patch
class TestModule(TestCase):
Reported by Pylint.
Line: 2
Column: 1
import torch
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_modules import module_db, modules
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from)
from unittest.mock import patch
class TestModule(TestCase):
Reported by Pylint.
Line: 3
Column: 1
import torch
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_modules import module_db, modules
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from)
from unittest.mock import patch
class TestModule(TestCase):
Reported by Pylint.
Line: 4
Column: 1
import torch
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_modules import module_db, modules
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from)
from unittest.mock import patch
class TestModule(TestCase):
Reported by Pylint.
Line: 35
Column: 3
outputs = m(*args, **kwargs)
# === Compare outputs to a reference if one is specified. ===
# TODO: Handle precision
reference_fn = module_input.reference_fn
if reference_fn is not None:
ref_outputs = reference_fn(m, *args, **kwargs)
self.assertEqual(outputs, ref_outputs)
Reported by Pylint.
Line: 1
Column: 1
import torch
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_modules import module_db, modules
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from)
from unittest.mock import patch
class TestModule(TestCase):
Reported by Pylint.
Line: 6
Column: 1
from torch.testing._internal.common_modules import module_db, modules
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from)
from unittest.mock import patch
class TestModule(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
Reported by Pylint.
Line: 9
Column: 1
from unittest.mock import patch
class TestModule(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
precision = 1e-5
rel_tol = 1e-5
Reported by Pylint.
Line: 16
Column: 5
rel_tol = 1e-5
@modules(module_db)
def test_forward(self, device, dtype, module_info):
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False)
for module_input in module_inputs:
if module_input.forward_input is None:
Reported by Pylint.
Line: 26
Column: 1
with freeze_rng_state():
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
# === Do forward pass. ===
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
Reported by Pylint.
test/package/test_directory_reader.py
26 issues
Line: 9
Column: 1
from textwrap import dedent
from unittest import skipIf
import torch
from torch.package import PackageExporter, PackageImporter
from torch.testing._internal.common_utils import (
run_tests,
IS_FBCODE,
IS_SANDCASTLE,
Reported by Pylint.
Line: 10
Column: 1
from unittest import skipIf
import torch
from torch.package import PackageExporter, PackageImporter
from torch.testing._internal.common_utils import (
run_tests,
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
Reported by Pylint.
Line: 11
Column: 1
import torch
from torch.package import PackageExporter, PackageImporter
from torch.testing._internal.common_utils import (
run_tests,
IS_FBCODE,
IS_SANDCASTLE,
IS_WINDOWS,
)
Reported by Pylint.
Line: 70
Column: 9
"""
Test basic saving and loading of a packages from a DirectoryReader.
"""
import package_a
filename = self.temp()
with PackageExporter(filename) as e:
e.save_module("package_a")
Reported by Pylint.
Line: 88
Column: 9
"""
Test DirectoryReader's has_record().
"""
import package_a # noqa: F401
filename = self.temp()
with PackageExporter(filename) as e:
e.save_module("package_a")
Reported by Pylint.
Line: 261
Column: 9
Test basic saving and loading of a ScriptModule in a directory.
Currently not supported.
"""
from package_a.test_module import ModWithTensor
scripted_mod = torch.jit.script(ModWithTensor(torch.rand(1, 2, 3)))
filename = self.temp()
with PackageExporter(filename) as e:
Reported by Pylint.
Line: 63
Column: 13
zip_file.extractall(path=temp_dir)
importer = PackageImporter(Path(temp_dir) / Path(filename).name)
dir_mod = importer.load_pickle("model", "model.pkl")
input = torch.rand(1, 3, 224, 224)
self.assertTrue(torch.allclose(dir_mod(input), resnet(input)))
def test_loading_module(self):
"""
Test basic saving and loading of a packages from a DirectoryReader.
Reported by Pylint.
Line: 88
Column: 9
"""
Test DirectoryReader's has_record().
"""
import package_a # noqa: F401
filename = self.temp()
with PackageExporter(filename) as e:
e.save_module("package_a")
Reported by Pylint.
Line: 279
Column: 17
with TemporaryDirectory() as temp_dir:
zip_file.extractall(path=temp_dir)
dir_importer = PackageImporter(Path(temp_dir) / Path(filename).name)
dir_mod = dir_importer.load_pickle("res", "mod.pkl")
if __name__ == "__main__":
run_tests()
Reported by Pylint.
Line: 1
Column: 1
# -*- coding: utf-8 -*-
import os
import zipfile
from sys import version_info
from tempfile import TemporaryDirectory
from textwrap import dedent
from unittest import skipIf
import torch
Reported by Pylint.
c10/test/util/LeftRight_test.cpp
26 issues
Line: 12
Column: 18
CWE codes:
120
20
LeftRight<int> obj;
obj.write([](int& obj) { obj = 5; });
int read = obj.read([](const int& obj) { return obj; });
EXPECT_EQ(5, read);
// check changes are also present in background copy
obj.write([](int&) {}); // this switches to the background copy
read = obj.read([](const int& obj) { return obj; });
Reported by FlawFinder.
Line: 13
Column: 16
CWE codes:
120
20
obj.write([](int& obj) { obj = 5; });
int read = obj.read([](const int& obj) { return obj; });
EXPECT_EQ(5, read);
// check changes are also present in background copy
obj.write([](int&) {}); // this switches to the background copy
read = obj.read([](const int& obj) { return obj; });
EXPECT_EQ(5, read);
Reported by FlawFinder.
Line: 17
Column: 14
CWE codes:
120
20
// check changes are also present in background copy
obj.write([](int&) {}); // this switches to the background copy
read = obj.read([](const int& obj) { return obj; });
EXPECT_EQ(5, read);
}
TEST(LeftRightTest, givenVector_whenWritingAndReading_thenChangesArePresent) {
LeftRight<vector<int>> obj;
Reported by FlawFinder.
Line: 18
Column: 16
CWE codes:
120
20
// check changes are also present in background copy
obj.write([](int&) {}); // this switches to the background copy
read = obj.read([](const int& obj) { return obj; });
EXPECT_EQ(5, read);
}
TEST(LeftRightTest, givenVector_whenWritingAndReading_thenChangesArePresent) {
LeftRight<vector<int>> obj;
Reported by FlawFinder.
Line: 25
Column: 26
CWE codes:
120
20
LeftRight<vector<int>> obj;
obj.write([](vector<int>& obj) { obj.push_back(5); });
vector<int> read = obj.read([](const vector<int>& obj) { return obj; });
EXPECT_EQ((vector<int>{5}), read);
obj.write([](vector<int>& obj) { obj.push_back(6); });
read = obj.read([](const vector<int>& obj) { return obj; });
EXPECT_EQ((vector<int>{5, 6}), read);
Reported by FlawFinder.
Line: 26
Column: 31
CWE codes:
120
20
obj.write([](vector<int>& obj) { obj.push_back(5); });
vector<int> read = obj.read([](const vector<int>& obj) { return obj; });
EXPECT_EQ((vector<int>{5}), read);
obj.write([](vector<int>& obj) { obj.push_back(6); });
read = obj.read([](const vector<int>& obj) { return obj; });
EXPECT_EQ((vector<int>{5, 6}), read);
}
Reported by FlawFinder.
Line: 29
Column: 14
CWE codes:
120
20
EXPECT_EQ((vector<int>{5}), read);
obj.write([](vector<int>& obj) { obj.push_back(6); });
read = obj.read([](const vector<int>& obj) { return obj; });
EXPECT_EQ((vector<int>{5, 6}), read);
}
TEST(LeftRightTest, givenVector_whenWritingReturnsValue_thenValueIsReturned) {
LeftRight<vector<int>> obj;
Reported by FlawFinder.
Line: 30
Column: 34
CWE codes:
120
20
obj.write([](vector<int>& obj) { obj.push_back(6); });
read = obj.read([](const vector<int>& obj) { return obj; });
EXPECT_EQ((vector<int>{5, 6}), read);
}
TEST(LeftRightTest, givenVector_whenWritingReturnsValue_thenValueIsReturned) {
LeftRight<vector<int>> obj;
Reported by FlawFinder.
Line: 46
Column: 9
CWE codes:
120
20
std::atomic<int> num_running_readers{0};
std::thread reader1([&]() {
obj.read([&](const int&) {
++num_running_readers;
while (num_running_readers.load() < 2) {
}
});
});
Reported by FlawFinder.
caffe2/python/operator_test/lengths_top_k_ops_test.py
26 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestLengthsTopKOps(serial.SerializedTestCase):
@serial.given(N=st.integers(min_value=0, max_value=10),
Reported by Pylint.
Line: 54
Column: 27
X = np.array([], dtype=np.float32)
op = core.CreateOperator("LengthsTopK", ["X", "Y"], ["values", "indices"], k=K)
def lengths_top_k(X, lens):
return (np.zeros((N, K), dtype=np.float32),
-1 * np.ones((N, K), dtype=np.int32))
self.assertDeviceChecks(dc, op, [X, lens], [0, 1])
self.assertReferenceChecks(gc, op, [X, lens], lengths_top_k)
Reported by Pylint.
Line: 54
Column: 30
X = np.array([], dtype=np.float32)
op = core.CreateOperator("LengthsTopK", ["X", "Y"], ["values", "indices"], k=K)
def lengths_top_k(X, lens):
return (np.zeros((N, K), dtype=np.float32),
-1 * np.ones((N, K), dtype=np.int32))
self.assertDeviceChecks(dc, op, [X, lens], [0, 1])
self.assertReferenceChecks(gc, op, [X, lens], lengths_top_k)
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 14
Column: 1
import numpy as np
class TestLengthsTopKOps(serial.SerializedTestCase):
@serial.given(N=st.integers(min_value=0, max_value=10),
K=st.integers(min_value=1, max_value=10),
**hu.gcs_cpu_only)
def test_lengths_top_k_op(self, N, K, gc, dc):
lens = np.random.randint(low=1, high=2 * K + 1, size=N).astype(np.int32)
Reported by Pylint.
Line: 18
Column: 5
@serial.given(N=st.integers(min_value=0, max_value=10),
K=st.integers(min_value=1, max_value=10),
**hu.gcs_cpu_only)
def test_lengths_top_k_op(self, N, K, gc, dc):
lens = np.random.randint(low=1, high=2 * K + 1, size=N).astype(np.int32)
X = []
for i in lens:
X.extend(x / 100.0 for x in range(0, 6 * i, 6))
X = np.array(X, dtype=np.float32)
Reported by Pylint.
Line: 18
Column: 5
@serial.given(N=st.integers(min_value=0, max_value=10),
K=st.integers(min_value=1, max_value=10),
**hu.gcs_cpu_only)
def test_lengths_top_k_op(self, N, K, gc, dc):
lens = np.random.randint(low=1, high=2 * K + 1, size=N).astype(np.int32)
X = []
for i in lens:
X.extend(x / 100.0 for x in range(0, 6 * i, 6))
X = np.array(X, dtype=np.float32)
Reported by Pylint.
Line: 18
Column: 5
@serial.given(N=st.integers(min_value=0, max_value=10),
K=st.integers(min_value=1, max_value=10),
**hu.gcs_cpu_only)
def test_lengths_top_k_op(self, N, K, gc, dc):
lens = np.random.randint(low=1, high=2 * K + 1, size=N).astype(np.int32)
X = []
for i in lens:
X.extend(x / 100.0 for x in range(0, 6 * i, 6))
X = np.array(X, dtype=np.float32)
Reported by Pylint.
Line: 18
Column: 5
@serial.given(N=st.integers(min_value=0, max_value=10),
K=st.integers(min_value=1, max_value=10),
**hu.gcs_cpu_only)
def test_lengths_top_k_op(self, N, K, gc, dc):
lens = np.random.randint(low=1, high=2 * K + 1, size=N).astype(np.int32)
X = []
for i in lens:
X.extend(x / 100.0 for x in range(0, 6 * i, 6))
X = np.array(X, dtype=np.float32)
Reported by Pylint.
benchmarks/sparse/dlmc/matmul_bench.py
26 issues
Line: 10
Column: 1
import sys
import argparse
import torch
import torch.utils.benchmark as benchmark_utils
from .utils import load_dlmc_dataset
from scipy.sparse import isspmatrix
import os
Reported by Pylint.
Line: 11
Column: 1
import sys
import argparse
import torch
import torch.utils.benchmark as benchmark_utils
from .utils import load_dlmc_dataset
from scipy.sparse import isspmatrix
import os
Reported by Pylint.
Line: 12
Column: 1
import argparse
import torch
import torch.utils.benchmark as benchmark_utils
from .utils import load_dlmc_dataset
from scipy.sparse import isspmatrix
import os
def scipy_matmul(mat1, mat2):
Reported by Pylint.
Line: 13
Column: 1
import torch
import torch.utils.benchmark as benchmark_utils
from .utils import load_dlmc_dataset
from scipy.sparse import isspmatrix
import os
def scipy_matmul(mat1, mat2):
if isspmatrix(mat1) and isspmatrix(mat2):
Reported by Pylint.
Line: 41
Column: 5
# also get the arguments as input from the user using `argparse`
def parse_args():
parser = argparse.ArgumentParser(description='matmul benchmark')
parser.add_argument('--path', type=str, help='DLMC dataset path')
parser.add_argument('--dataset', type=str, default='magnitude_pruning')
parser.add_argument('--hidden_size', default=2048, type=int)
parser.add_argument('--backward_test', action="store_true")
parser.add_argument('--operation', type=str, help="|".join(OPS_MAP.keys()), default=next(iter(OPS_MAP)))
Reported by Pylint.
Line: 52
Column: 34
return parser
def get_tasks(op, backward_test, device):
def filter_ops(operation):
if backward_test:
test_name = device + ":matmul-backward"
return [
(test_name, device, "torch:" + operation.replace("sparse", "dense"),
Reported by Pylint.
Line: 1
Column: 1
# Sparse benchmarks
# This benchmark is for sparse matmul performance test.
# They exist for comparing the performance of sparse matrix routines
# `sparse @ vector`, `sparse @ sparse` and `sparse @ dense` with different backends (CPU/CUDA)
# and with other frameworks such as scipy.
import sys
import argparse
Reported by Pylint.
Line: 13
Column: 1
import torch
import torch.utils.benchmark as benchmark_utils
from .utils import load_dlmc_dataset
from scipy.sparse import isspmatrix
import os
def scipy_matmul(mat1, mat2):
if isspmatrix(mat1) and isspmatrix(mat2):
Reported by Pylint.
Line: 14
Column: 1
import torch.utils.benchmark as benchmark_utils
from .utils import load_dlmc_dataset
from scipy.sparse import isspmatrix
import os
def scipy_matmul(mat1, mat2):
if isspmatrix(mat1) and isspmatrix(mat2):
return mat1.dot(mat2).tocoo()
Reported by Pylint.
Line: 17
Column: 1
import os
def scipy_matmul(mat1, mat2):
if isspmatrix(mat1) and isspmatrix(mat2):
return mat1.dot(mat2).tocoo()
return mat1.dot(mat2)
def matmul_backward(a_dense, b_dense, grad_output):
Reported by Pylint.
benchmarks/profiler_benchmark/profiler_bench.py
26 issues
Line: 4
Column: 1
import argparse
import sys
import timeit
import torch
from torch.utils.benchmark import Timer
PARALLEL_TASKS_NUM = 4
INTERNAL_ITER = None
Reported by Pylint.
Line: 6
Column: 1
import timeit
import torch
from torch.utils.benchmark import Timer
PARALLEL_TASKS_NUM = 4
INTERNAL_ITER = None
def loop_workload(x):
for i in range(INTERNAL_ITER):
Reported by Pylint.
Line: 11
Column: 9
PARALLEL_TASKS_NUM = 4
INTERNAL_ITER = None
def loop_workload(x):
for i in range(INTERNAL_ITER):
x = torch.mm(x, x)
return x
def parallel_workload(x):
def parallel_task(x):
Reported by Pylint.
Line: 17
Column: 13
def parallel_workload(x):
def parallel_task(x):
for i in range(int(INTERNAL_ITER / PARALLEL_TASKS_NUM)):
x = torch.mm(x, x)
return x
futs = []
for i in range(PARALLEL_TASKS_NUM):
futs.append(torch.jit._fork(parallel_task, x))
Reported by Pylint.
Line: 22
Column: 21
return x
futs = []
for i in range(PARALLEL_TASKS_NUM):
futs.append(torch.jit._fork(parallel_task, x))
for i in range(PARALLEL_TASKS_NUM):
torch.jit._wait(futs[i])
return x
Reported by Pylint.
Line: 24
Column: 9
for i in range(PARALLEL_TASKS_NUM):
futs.append(torch.jit._fork(parallel_task, x))
for i in range(PARALLEL_TASKS_NUM):
torch.jit._wait(futs[i])
return x
if __name__ == '__main__':
torch._C._set_graph_executor_optimize(False)
Reported by Pylint.
Line: 29
Column: 5
if __name__ == '__main__':
torch._C._set_graph_executor_optimize(False)
parser = argparse.ArgumentParser(
description='Profiler benchmark')
parser.add_argument('--with_cuda', action='store_true')
parser.add_argument('--with_stack', action='store_true')
Reported by Pylint.
Line: 29
Column: 5
if __name__ == '__main__':
torch._C._set_graph_executor_optimize(False)
parser = argparse.ArgumentParser(
description='Profiler benchmark')
parser.add_argument('--with_cuda', action='store_true')
parser.add_argument('--with_stack', action='store_true')
Reported by Pylint.
Line: 88
Column: 56
use_cuda=args.with_cuda,
with_stack=args.with_stack,
use_kineto=args.use_kineto,
use_cpu=not args.cuda_only) as prof:
x = workload(input_x)
return x
else:
def payload():
return workload(input_x)
Reported by Pylint.
Line: 89
Column: 34
with_stack=args.with_stack,
use_kineto=args.use_kineto,
use_cpu=not args.cuda_only) as prof:
x = workload(input_x)
return x
else:
def payload():
return workload(input_x)
Reported by Pylint.
benchmarks/operator_benchmark/pt/tensor_to_test.py
26 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
tensor_conversion_short_configs = op_bench.cross_product_configs(
M=(8, 16, 32,),
N=(16, 64, 128,),
device=['cpu', 'cuda'],
tags=['short'],
)
Reported by Pylint.
Line: 4
Column: 35
import operator_benchmark as op_bench
import torch
tensor_conversion_short_configs = op_bench.cross_product_configs(
M=(8, 16, 32,),
N=(16, 64, 128,),
device=['cpu', 'cuda'],
tags=['short'],
)
Reported by Pylint.
Line: 11
Column: 34
tags=['short'],
)
tensor_conversion_long_configs = op_bench.cross_product_configs(
M=(64, 128, 256, 512,),
N=(256, 512, 1024, 2048,),
device=['cpu', 'cuda'],
tags=['long'],
)
Reported by Pylint.
Line: 18
Column: 44
tags=['long'],
)
class FloatToHalfTensorConversionBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, device):
self.inputs = {
"input": torch.rand(M, N, device=device, requires_grad=False, dtype=torch.float)
}
Reported by Pylint.
Line: 27
Column: 44
def forward(self, input):
return input.to(torch.half)
class HalfToFloatTensorConversionBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, device):
self.inputs = {
"input": torch.rand(M, N, device=device, requires_grad=False, dtype=torch.half)
}
Reported by Pylint.
Line: 37
Column: 1
return input.to(torch.float)
op_bench.generate_pt_test(tensor_conversion_short_configs, FloatToHalfTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_long_configs, FloatToHalfTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_short_configs, HalfToFloatTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_long_configs, HalfToFloatTensorConversionBenchmark)
if __name__ == "__main__":
Reported by Pylint.
Line: 38
Column: 1
op_bench.generate_pt_test(tensor_conversion_short_configs, FloatToHalfTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_long_configs, FloatToHalfTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_short_configs, HalfToFloatTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_long_configs, HalfToFloatTensorConversionBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 39
Column: 1
op_bench.generate_pt_test(tensor_conversion_short_configs, FloatToHalfTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_long_configs, FloatToHalfTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_short_configs, HalfToFloatTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_long_configs, HalfToFloatTensorConversionBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 40
Column: 1
op_bench.generate_pt_test(tensor_conversion_short_configs, FloatToHalfTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_long_configs, FloatToHalfTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_short_configs, HalfToFloatTensorConversionBenchmark)
op_bench.generate_pt_test(tensor_conversion_long_configs, HalfToFloatTensorConversionBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 20
Column: 9
class FloatToHalfTensorConversionBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, device):
self.inputs = {
"input": torch.rand(M, N, device=device, requires_grad=False, dtype=torch.float)
}
def forward(self, input):
return input.to(torch.half)
Reported by Pylint.
benchmarks/operator_benchmark/pt/qbatchnorm_test.py
26 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized batchnorm operator."""
batchnorm_configs_short = op_bench.config_list(
attr_names=["M", "N", "K"],
Reported by Pylint.
Line: 8
Column: 27
"""Microbenchmarks for quantized batchnorm operator."""
batchnorm_configs_short = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 256, 3136],
],
cross_product_configs={
Reported by Pylint.
Line: 21
Column: 27
)
class QBatchNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype):
self._init(M, N, K, device)
x_scale = 0.1
x_zero_point = 0
self.inputs = {
Reported by Pylint.
Line: 89
Column: 1
Y_scale, Y_zero_point)
op_bench.generate_pt_test(batchnorm_configs_short, QBatchNorm1dBenchmark)
op_bench.generate_pt_test(batchnorm_configs_short, QBatchNorm2dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 90
Column: 1
op_bench.generate_pt_test(batchnorm_configs_short, QBatchNorm1dBenchmark)
op_bench.generate_pt_test(batchnorm_configs_short, QBatchNorm2dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 6
Column: 1
import torch
"""Microbenchmarks for quantized batchnorm operator."""
batchnorm_configs_short = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 256, 3136],
Reported by Pylint.
Line: 26
Column: 9
self._init(M, N, K, device)
x_scale = 0.1
x_zero_point = 0
self.inputs = {
"q_input_one": torch.quantize_per_tensor(
self.input_one, scale=x_scale, zero_point=x_zero_point, dtype=dtype),
"mean": torch.rand(N),
"var": torch.rand(N),
"weight": torch.rand(N),
Reported by Pylint.
Line: 48
Column: 9
class QBatchNorm1dBenchmark(QBatchNormBenchmark):
def _init(self, M, N, K, device):
self.set_module_name("QBatchNorm1d")
self.input_one = torch.rand(M, N, K, device=device, requires_grad=self.auto_set())
def forward(
self,
q_input_one,
weight,
Reported by Pylint.
Line: 50
Column: 5
self.set_module_name("QBatchNorm1d")
self.input_one = torch.rand(M, N, K, device=device, requires_grad=self.auto_set())
def forward(
self,
q_input_one,
weight,
bias,
mean,
Reported by Pylint.
Line: 71
Column: 9
self.set_module_name("QBatchNorm2d")
# Note: quantized implementation requires rank 4, which is why we
# add a 1 as the last dimension
self.input_one = torch.rand(M, N, K, 1, device=device, requires_grad=self.auto_set())
def forward(
self,
q_input_one,
weight,
Reported by Pylint.
tools/code_coverage/package/util/utils.py
26 issues
Line: 7
Column: 1
import time
from typing import Any, NoReturn, Optional
from .setting import (
LOG_DIR,
PROFILE_DIR,
CompilerType,
TestList,
TestPlatform,
Reported by Pylint.
Line: 96
Column: 9
cov_type = detect_compiler_type() # type: ignore[call-arg]
else:
from caffe2.fb.code_coverage.tool.package.fbcode.utils import ( # type: ignore[import]
detect_compiler_type,
)
cov_type = detect_compiler_type()
Reported by Pylint.
Line: 92
Column: 9
def detect_compiler_type(platform: TestPlatform) -> CompilerType:
if platform == TestPlatform.OSS:
from package.oss.utils import detect_compiler_type # type: ignore[misc]
cov_type = detect_compiler_type() # type: ignore[call-arg]
else:
from caffe2.fb.code_coverage.tool.package.fbcode.utils import ( # type: ignore[import]
detect_compiler_type,
Reported by Pylint.
Line: 1
Column: 1
import os
import shutil
import sys
import time
from typing import Any, NoReturn, Optional
from .setting import (
LOG_DIR,
PROFILE_DIR,
Reported by Pylint.
Line: 17
Column: 1
)
def convert_time(seconds: float) -> str:
seconds = int(round(seconds))
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
Reported by Pylint.
Line: 28
Column: 1
return "%d:%02d:%02d" % (hour, minutes, seconds)
def print_time(message: str, start_time: float, summary_time: bool = False) -> None:
with open(os.path.join(LOG_DIR, "log.txt"), "a+") as log_file:
end_time = time.time()
print(message, convert_time(end_time - start_time), file=log_file)
if summary_time:
print("\n", file=log_file)
Reported by Pylint.
Line: 36
Column: 1
print("\n", file=log_file)
def print_log(*args: Any) -> None:
with open(os.path.join(LOG_DIR, "log.txt"), "a+") as log_file:
print(f"[LOG] {' '.join(args)}", file=log_file)
def print_error(*args: Any) -> None:
Reported by Pylint.
Line: 41
Column: 1
print(f"[LOG] {' '.join(args)}", file=log_file)
def print_error(*args: Any) -> None:
with open(os.path.join(LOG_DIR, "log.txt"), "a+") as log_file:
print(f"[ERROR] {' '.join(args)}", file=log_file)
def remove_file(path: str) -> None:
Reported by Pylint.
Line: 46
Column: 1
print(f"[ERROR] {' '.join(args)}", file=log_file)
def remove_file(path: str) -> None:
if os.path.exists(path):
os.remove(path)
def remove_folder(path: str) -> None:
Reported by Pylint.
Line: 51
Column: 1
os.remove(path)
def remove_folder(path: str) -> None:
shutil.rmtree(path)
def create_folder(*paths: Any) -> None:
for path in paths:
Reported by Pylint.