The following issues were found
benchmarks/operator_benchmark/pt/hardswish_test.py
19 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for the hardswish operators.
"""
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for the hardswish operators.
"""
Reported by Pylint.
Line: 13
Column: 27
# Configs for hardswish ops
hardswish_configs_short = op_bench.config_list(
attr_names=[
'N', 'C', 'H', 'W'
],
attrs=[
[1, 3, 256, 256],
Reported by Pylint.
Line: 28
Column: 26
)
hardswish_configs_long = op_bench.cross_product_configs(
N=[8, 16],
C=[3],
H=[256, 512],
W=[256, 512],
device=['cpu'],
Reported by Pylint.
Line: 38
Column: 22
)
hardswish_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['Hardswish', nn.Hardswish],
],
)
Reported by Pylint.
Line: 46
Column: 26
)
class HardswishBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device, op_func):
self.inputs = {
"input_one": torch.rand(N, C, H, W, device=device)
}
self.op_func = op_func()
Reported by Pylint.
Line: 57
Column: 1
return self.op_func(input_one)
op_bench.generate_pt_tests_from_op_list(hardswish_ops_list,
hardswish_configs_short + hardswish_configs_long,
HardswishBenchmark)
if __name__ == "__main__":
Reported by Pylint.
Line: 7
Column: 1
import torch.nn as nn
"""
Microbenchmarks for the hardswish operators.
"""
# Configs for hardswish ops
Reported by Pylint.
Line: 48
Column: 9
class HardswishBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device, op_func):
self.inputs = {
"input_one": torch.rand(N, C, H, W, device=device)
}
self.op_func = op_func()
def forward(self, input_one):
Reported by Pylint.
Line: 51
Column: 9
self.inputs = {
"input_one": torch.rand(N, C, H, W, device=device)
}
self.op_func = op_func()
def forward(self, input_one):
return self.op_func(input_one)
Reported by Pylint.
.circleci/cimodel/data/windows_build_definitions.py
19 issues
Line: 58
Column: 13
prerequisite_jobs.append("_".join(base_name_parts + ["build"]))
if self.cuda_version:
self.cudnn_version = 8 if self.cuda_version.major == 11 else 7
arch_env_elements = (
["cuda" + str(self.cuda_version.major) + "." + str(self.cuda_version.minor)]
if self.cuda_version
else ["cpu"]
Reported by Pylint.
Line: 154
Column: 3
# VS2019 CUDA-10.1 force on cpu
WindowsJob(1, _VC2019, CudaVersion(10, 1), force_on_cpu=True, master_only=True),
# TODO: This test is disabled due to https://github.com/pytorch/pytorch/issues/59724
# WindowsJob('_azure_multi_gpu', _VC2019, CudaVersion(11, 1), multi_gpu=True, master_and_nightly=True),
]
def get_windows_workflows():
Reported by Pylint.
Line: 1
Column: 1
import cimodel.lib.miniutils as miniutils
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN, NON_PR_BRANCH_LIST
from cimodel.data.simple.util.versions import CudaVersion
class WindowsJob:
def __init__(
self,
test_index,
Reported by Pylint.
Line: 6
Column: 1
from cimodel.data.simple.util.versions import CudaVersion
class WindowsJob:
def __init__(
self,
test_index,
vscode_spec,
cuda_version,
Reported by Pylint.
Line: 6
Column: 1
from cimodel.data.simple.util.versions import CudaVersion
class WindowsJob:
def __init__(
self,
test_index,
vscode_spec,
cuda_version,
Reported by Pylint.
Line: 6
Column: 1
from cimodel.data.simple.util.versions import CudaVersion
class WindowsJob:
def __init__(
self,
test_index,
vscode_spec,
cuda_version,
Reported by Pylint.
Line: 7
Column: 5
class WindowsJob:
def __init__(
self,
test_index,
vscode_spec,
cuda_version,
force_on_cpu=False,
Reported by Pylint.
Line: 27
Column: 5
self.nightly_only = nightly_only
self.master_and_nightly = master_and_nightly
def gen_tree(self):
base_phase = "build" if self.test_index is None else "test"
numbered_phase = (
base_phase if self.test_index is None else base_phase + str(self.test_index)
)
Reported by Pylint.
Line: 27
Column: 5
self.nightly_only = nightly_only
self.master_and_nightly = master_and_nightly
def gen_tree(self):
base_phase = "build" if self.test_index is None else "test"
numbered_phase = (
base_phase if self.test_index is None else base_phase + str(self.test_index)
)
Reported by Pylint.
Line: 100
Column: 1
elif self.master_and_nightly:
props_dict[
"filters"
] = gen_filter_dict(branches_list=NON_PR_BRANCH_LIST + ["nightly"], tags_list=RC_PATTERN)
name_parts = base_name_parts + cpu_forcing_name_parts + [numbered_phase]
if not self.multi_gpu:
if base_phase == "test":
Reported by Pylint.
benchmarks/functional_autograd_benchmark/ppl_models.py
19 issues
Line: 1
Column: 1
import torch
from torch import Tensor
import torch.distributions as dist
from utils import GetterReturnType
def get_simple_regression(device: torch.device) -> GetterReturnType:
N = 10
K = 10
Reported by Pylint.
Line: 2
Column: 1
import torch
from torch import Tensor
import torch.distributions as dist
from utils import GetterReturnType
def get_simple_regression(device: torch.device) -> GetterReturnType:
N = 10
K = 10
Reported by Pylint.
Line: 3
Column: 1
import torch
from torch import Tensor
import torch.distributions as dist
from utils import GetterReturnType
def get_simple_regression(device: torch.device) -> GetterReturnType:
N = 10
K = 10
Reported by Pylint.
Line: 1
Column: 1
import torch
from torch import Tensor
import torch.distributions as dist
from utils import GetterReturnType
def get_simple_regression(device: torch.device) -> GetterReturnType:
N = 10
K = 10
Reported by Pylint.
Line: 7
Column: 1
from utils import GetterReturnType
def get_simple_regression(device: torch.device) -> GetterReturnType:
N = 10
K = 10
loc_beta = 0.
scale_beta = 1.
Reported by Pylint.
Line: 8
Column: 5
from utils import GetterReturnType
def get_simple_regression(device: torch.device) -> GetterReturnType:
N = 10
K = 10
loc_beta = 0.
scale_beta = 1.
Reported by Pylint.
Line: 9
Column: 5
def get_simple_regression(device: torch.device) -> GetterReturnType:
N = 10
K = 10
loc_beta = 0.
scale_beta = 1.
beta_prior = dist.Normal(loc_beta, scale_beta)
Reported by Pylint.
Line: 16
Column: 5
beta_prior = dist.Normal(loc_beta, scale_beta)
X = torch.rand(N, K + 1, device=device)
Y = torch.rand(N, 1, device=device)
# X.shape: (N, K + 1), Y.shape: (N, 1), beta_value.shape: (K + 1, 1)
beta_value = beta_prior.sample((K + 1, 1))
beta_value.requires_grad_(True)
Reported by Pylint.
Line: 17
Column: 5
beta_prior = dist.Normal(loc_beta, scale_beta)
X = torch.rand(N, K + 1, device=device)
Y = torch.rand(N, 1, device=device)
# X.shape: (N, K + 1), Y.shape: (N, 1), beta_value.shape: (K + 1, 1)
beta_value = beta_prior.sample((K + 1, 1))
beta_value.requires_grad_(True)
Reported by Pylint.
Line: 24
Column: 9
beta_value.requires_grad_(True)
def forward(beta_value: Tensor) -> Tensor:
mu = X.mm(beta_value)
# We need to compute the first and second gradient of this score with respect
# to beta_value. We disable Bernoulli validation because Y is a relaxed value.
score = (dist.Bernoulli(logits=mu, validate_args=False).log_prob(Y).sum() +
beta_prior.log_prob(beta_value).sum())
Reported by Pylint.
aten/src/ATen/test/basic.cpp
19 issues
Line: 97
Column: 9
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
.count()
<< " ms" << std::endl;
std::srand(std::time(nullptr));
ASSERT_EQ(norm(a).item<double>(), 0.0);
}
void TestLoadsOfAdds(DeprecatedTypeProperties& type) {
auto begin = std::chrono::high_resolution_clock::now();
Reported by FlawFinder.
Line: 154
Column: 19
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
Tensor a = rand({3, 4}, type);
Tensor b = rand({4}, type);
Tensor c = mv(a, b);
ASSERT_TRUE(c.equal(addmv(zeros({3}, type), a, b, 0, 1)));
}
}
void TestSqueeze(DeprecatedTypeProperties& type) {
Tensor a = rand({2, 1}, type);
Reported by FlawFinder.
Line: 165
Column: 20
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
a = rand({1}, type);
b = squeeze(a);
// TODO 0-dim squeeze
ASSERT_TRUE(a[0].equal(b));
}
void TestCopy(DeprecatedTypeProperties& type) {
Tensor a = zeros({4, 3}, type);
Tensor e = rand({4, 3}, type);
Reported by FlawFinder.
Line: 172
Column: 17
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
Tensor a = zeros({4, 3}, type);
Tensor e = rand({4, 3}, type);
a.copy_(e);
ASSERT_TRUE(a.equal(e));
}
void TestCopyBroadcasting(DeprecatedTypeProperties& type) {
Tensor a = zeros({4, 3}, type);
Tensor e = rand({3}, type);
Reported by FlawFinder.
Line: 180
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
Tensor e = rand({3}, type);
a.copy_(e);
for (int i = 0; i < 4; ++i) {
ASSERT_TRUE(a[i].equal(e));
}
}
void TestAbsValue(DeprecatedTypeProperties& type) {
Tensor r = at::abs(at::scalar_tensor(-3, type.options()));
ASSERT_EQ_RESOLVED(r.item<int32_t>(), 3);
Reported by FlawFinder.
Line: 202
Column: 40
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
void TestAddingAValueWithScalar(DeprecatedTypeProperties& type) {
Tensor a = rand({4, 3}, type);
ASSERT_TRUE((ones({4, 3}, type) + a).equal(add(a, 1)));
}
void TestSelect(DeprecatedTypeProperties& type) {
Tensor a = rand({3, 7}, type);
auto a_13 = select(a, 1, 3);
Reported by FlawFinder.
Line: 209
Column: 23
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
Tensor a = rand({3, 7}, type);
auto a_13 = select(a, 1, 3);
auto a_13_02 = select(select(a, 1, 3), 0, 2);
ASSERT_TRUE(a[0][3].equal(a_13[0]));
ASSERT_TRUE(a[2][3].equal(a_13_02));
}
void TestZeroDim(DeprecatedTypeProperties& type) {
Tensor a = at::scalar_tensor(4, type.options()); // rand(type, {1});
Reported by FlawFinder.
Line: 210
Column: 23
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto a_13 = select(a, 1, 3);
auto a_13_02 = select(select(a, 1, 3), 0, 2);
ASSERT_TRUE(a[0][3].equal(a_13[0]));
ASSERT_TRUE(a[2][3].equal(a_13_02));
}
void TestZeroDim(DeprecatedTypeProperties& type) {
Tensor a = at::scalar_tensor(4, type.options()); // rand(type, {1});
Reported by FlawFinder.
Line: 251
Column: 27
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
Tensor tensor = arange(0, 10, kInt);
Tensor one = ones({}, kInt);
for (int64_t i = 0; i < tensor.numel(); ++i) {
ASSERT_TRUE(tensor[i].equal(one * i));
}
for (size_t i = 0; i < static_cast<uint64_t>(tensor.numel()); ++i) {
ASSERT_TRUE(tensor[i].equal(one * static_cast<int64_t>(i)));
}
for (int i = 0; i < tensor.numel(); ++i) {
Reported by FlawFinder.
Line: 254
Column: 27
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
ASSERT_TRUE(tensor[i].equal(one * i));
}
for (size_t i = 0; i < static_cast<uint64_t>(tensor.numel()); ++i) {
ASSERT_TRUE(tensor[i].equal(one * static_cast<int64_t>(i)));
}
for (int i = 0; i < tensor.numel(); ++i) {
ASSERT_TRUE(tensor[i].equal(one * i));
}
// NOLINTNEXTLINE(bugprone-too-small-loop-variable)
Reported by FlawFinder.
benchmarks/operator_benchmark/pt/hardsigmoid_test.py
19 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for the hardsigmoid operator.
"""
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for the hardsigmoid operator.
"""
Reported by Pylint.
Line: 13
Column: 29
# Configs for hardsigmoid ops
hardsigmoid_configs_short = op_bench.config_list(
attr_names=[
'N', 'C', 'H', 'W'
],
attrs=[
[1, 3, 256, 256],
Reported by Pylint.
Line: 28
Column: 28
)
hardsigmoid_configs_long = op_bench.cross_product_configs(
N=[8, 16],
C=[3],
H=[256, 512],
W=[256, 512],
device=['cpu'],
Reported by Pylint.
Line: 38
Column: 24
)
hardsigmoid_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['Hardsigmoid', nn.Hardsigmoid],
],
)
Reported by Pylint.
Line: 46
Column: 28
)
class HardsigmoidBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device, op_func):
self.inputs = {
"input_one": torch.rand(N, C, H, W, device=device)
}
self.op_func = op_func()
Reported by Pylint.
Line: 57
Column: 1
return self.op_func(input_one)
op_bench.generate_pt_tests_from_op_list(hardsigmoid_ops_list,
hardsigmoid_configs_short + hardsigmoid_configs_long,
HardsigmoidBenchmark)
if __name__ == "__main__":
Reported by Pylint.
Line: 7
Column: 1
import torch.nn as nn
"""
Microbenchmarks for the hardsigmoid operator.
"""
# Configs for hardsigmoid ops
Reported by Pylint.
Line: 48
Column: 9
class HardsigmoidBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device, op_func):
self.inputs = {
"input_one": torch.rand(N, C, H, W, device=device)
}
self.op_func = op_func()
def forward(self, input_one):
Reported by Pylint.
Line: 51
Column: 9
self.inputs = {
"input_one": torch.rand(N, C, H, W, device=device)
}
self.op_func = op_func()
def forward(self, input_one):
return self.op_func(input_one)
Reported by Pylint.
caffe2/python/muji_test.py
19 issues
Line: 12
Column: 13
def RunningAllreduceWithGPUs(self, gpu_ids, allreduce_function):
"""A base function to test different scenarios."""
net = core.Net("mujitest")
for id in gpu_ids:
net.ConstantFill(
[],
"testblob_gpu_" + str(id),
shape=[1, 2, 3, 4],
value=float(id + 1),
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import unittest
from caffe2.python import core, workspace, muji, test_util
@unittest.skipIf(not workspace.has_gpu_support, "no gpu")
class TestMuji(test_util.TestCase):
def RunningAllreduceWithGPUs(self, gpu_ids, allreduce_function):
Reported by Pylint.
Line: 2
Column: 1
import numpy as np
import unittest
from caffe2.python import core, workspace, muji, test_util
@unittest.skipIf(not workspace.has_gpu_support, "no gpu")
class TestMuji(test_util.TestCase):
def RunningAllreduceWithGPUs(self, gpu_ids, allreduce_function):
Reported by Pylint.
Line: 8
Column: 1
@unittest.skipIf(not workspace.has_gpu_support, "no gpu")
class TestMuji(test_util.TestCase):
def RunningAllreduceWithGPUs(self, gpu_ids, allreduce_function):
"""A base function to test different scenarios."""
net = core.Net("mujitest")
for id in gpu_ids:
net.ConstantFill(
Reported by Pylint.
Line: 9
Column: 5
@unittest.skipIf(not workspace.has_gpu_support, "no gpu")
class TestMuji(test_util.TestCase):
def RunningAllreduceWithGPUs(self, gpu_ids, allreduce_function):
"""A base function to test different scenarios."""
net = core.Net("mujitest")
for id in gpu_ids:
net.ConstantFill(
[],
Reported by Pylint.
Line: 9
Column: 5
@unittest.skipIf(not workspace.has_gpu_support, "no gpu")
class TestMuji(test_util.TestCase):
def RunningAllreduceWithGPUs(self, gpu_ids, allreduce_function):
"""A base function to test different scenarios."""
net = core.Net("mujitest")
for id in gpu_ids:
net.ConstantFill(
[],
Reported by Pylint.
Line: 12
Column: 13
def RunningAllreduceWithGPUs(self, gpu_ids, allreduce_function):
"""A base function to test different scenarios."""
net = core.Net("mujitest")
for id in gpu_ids:
net.ConstantFill(
[],
"testblob_gpu_" + str(id),
shape=[1, 2, 3, 4],
value=float(id + 1),
Reported by Pylint.
Line: 39
Column: 5
err_msg="gpu id %d of %s" % (idx, str(gpu_ids))
)
def testAllreduceFallback(self):
self.RunningAllreduceWithGPUs(
list(range(workspace.NumGpuDevices())), muji.AllreduceFallback
)
def testAllreduceSingleGPU(self):
Reported by Pylint.
Line: 39
Column: 5
err_msg="gpu id %d of %s" % (idx, str(gpu_ids))
)
def testAllreduceFallback(self):
self.RunningAllreduceWithGPUs(
list(range(workspace.NumGpuDevices())), muji.AllreduceFallback
)
def testAllreduceSingleGPU(self):
Reported by Pylint.
Line: 44
Column: 5
list(range(workspace.NumGpuDevices())), muji.AllreduceFallback
)
def testAllreduceSingleGPU(self):
for i in range(workspace.NumGpuDevices()):
self.RunningAllreduceWithGPUs([i], muji.Allreduce)
def testAllreduceWithTwoGPUs(self):
pattern = workspace.GetGpuPeerAccessPattern()
Reported by Pylint.
benchmarks/upload_scribe.py
19 issues
Line: 26
Column: 25
assert 'time' in field_dict, "Missing required Scribe field 'time'"
message = defaultdict(dict)
for field, value in field_dict.items():
if field in self.schema['normal']:
message['normal'][field] = str(value)
elif field in self.schema['int']:
message['int'][field] = int(value)
elif field in self.schema['float']:
message['float'][field] = float(value)
Reported by Pylint.
Line: 28
Column: 27
for field, value in field_dict.items():
if field in self.schema['normal']:
message['normal'][field] = str(value)
elif field in self.schema['int']:
message['int'][field] = int(value)
elif field in self.schema['float']:
message['float'][field] = float(value)
else:
Reported by Pylint.
Line: 30
Column: 27
message['normal'][field] = str(value)
elif field in self.schema['int']:
message['int'][field] = int(value)
elif field in self.schema['float']:
message['float'][field] = float(value)
else:
raise ValueError("Field {} is not currently used, "
"be intentional about adding new fields".format(field))
Reported by Pylint.
Line: 42
Column: 13
for m in messages:
json_str = json.dumps(m)
cmd = ['scribe_cat', self.category, json_str]
subprocess.run(cmd)
def upload(self, messages):
if os.environ.get('SCRIBE_INTERN'):
return self._upload_intern(messages)
access_token = os.environ.get("SCRIBE_GRAPHQL_ACCESS_TOKEN")
Reported by Pylint.
Line: 14
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
import json
import os
import requests
import subprocess
from collections import defaultdict
class ScribeUploader:
def __init__(self, category):
Reported by Bandit.
Line: 14
Column: 1
import json
import os
import requests
import subprocess
from collections import defaultdict
class ScribeUploader:
def __init__(self, category):
Reported by Pylint.
Line: 15
Column: 1
import os
import requests
import subprocess
from collections import defaultdict
class ScribeUploader:
def __init__(self, category):
self.category = category
Reported by Pylint.
Line: 18
Column: 1
from collections import defaultdict
class ScribeUploader:
def __init__(self, category):
self.category = category
def format_message(self, field_dict):
assert 'time' in field_dict, "Missing required Scribe field 'time'"
Reported by Pylint.
Line: 22
Column: 5
def __init__(self, category):
self.category = category
def format_message(self, field_dict):
assert 'time' in field_dict, "Missing required Scribe field 'time'"
message = defaultdict(dict)
for field, value in field_dict.items():
if field in self.schema['normal']:
message['normal'][field] = str(value)
Reported by Pylint.
Line: 23
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self.category = category
def format_message(self, field_dict):
assert 'time' in field_dict, "Missing required Scribe field 'time'"
message = defaultdict(dict)
for field, value in field_dict.items():
if field in self.schema['normal']:
message['normal'][field] = str(value)
elif field in self.schema['int']:
Reported by Bandit.
benchmarks/fastrnns/test_bench.py
19 issues
Line: 1
Column: 1
import pytest
import torch
from .fuser import set_fuser
from .runner import get_nn_runners
@pytest.fixture(scope='class')
def modeldef(request, net_name, executor, fuser):
set_fuser(fuser, executor)
Reported by Pylint.
Line: 2
Column: 1
import pytest
import torch
from .fuser import set_fuser
from .runner import get_nn_runners
@pytest.fixture(scope='class')
def modeldef(request, net_name, executor, fuser):
set_fuser(fuser, executor)
Reported by Pylint.
Line: 3
Column: 1
import pytest
import torch
from .fuser import set_fuser
from .runner import get_nn_runners
@pytest.fixture(scope='class')
def modeldef(request, net_name, executor, fuser):
set_fuser(fuser, executor)
Reported by Pylint.
Line: 4
Column: 1
import pytest
import torch
from .fuser import set_fuser
from .runner import get_nn_runners
@pytest.fixture(scope='class')
def modeldef(request, net_name, executor, fuser):
set_fuser(fuser, executor)
Reported by Pylint.
Line: 7
Column: 14
from .runner import get_nn_runners
@pytest.fixture(scope='class')
def modeldef(request, net_name, executor, fuser):
set_fuser(fuser, executor)
# Given a 'net_name' provided by generate_tests, build the thing
name, rnn_creator, context = get_nn_runners(net_name)[0]
creator_args = creator_args = {
Reported by Pylint.
Line: 11
Column: 5
set_fuser(fuser, executor)
# Given a 'net_name' provided by generate_tests, build the thing
name, rnn_creator, context = get_nn_runners(net_name)[0]
creator_args = creator_args = {
'seqLength': 100, 'numLayers': 1,
'inputSize': 512, 'hiddenSize': 512,
'miniBatch': 64, 'device': 'cuda', 'seed': None
}
Reported by Pylint.
Line: 11
Column: 24
set_fuser(fuser, executor)
# Given a 'net_name' provided by generate_tests, build the thing
name, rnn_creator, context = get_nn_runners(net_name)[0]
creator_args = creator_args = {
'seqLength': 100, 'numLayers': 1,
'inputSize': 512, 'hiddenSize': 512,
'miniBatch': 64, 'device': 'cuda', 'seed': None
}
Reported by Pylint.
Line: 33
Column: 28
)
class TestBenchNetwork:
# See 'modeldef' fixture, which provides the things to benchmark
def test_forward(self, modeldef, benchmark):
forward_output = benchmark(cuda_sync, modeldef.forward, *modeldef.inputs)
def test_backward(self, modeldef, benchmark):
backward_input = modeldef.forward(*modeldef.inputs)
if modeldef.backward_setup is not None:
Reported by Pylint.
Line: 34
Column: 9
class TestBenchNetwork:
# See 'modeldef' fixture, which provides the things to benchmark
def test_forward(self, modeldef, benchmark):
forward_output = benchmark(cuda_sync, modeldef.forward, *modeldef.inputs)
def test_backward(self, modeldef, benchmark):
backward_input = modeldef.forward(*modeldef.inputs)
if modeldef.backward_setup is not None:
backward_input = modeldef.backward_setup(backward_input)
Reported by Pylint.
Line: 36
Column: 29
def test_forward(self, modeldef, benchmark):
forward_output = benchmark(cuda_sync, modeldef.forward, *modeldef.inputs)
def test_backward(self, modeldef, benchmark):
backward_input = modeldef.forward(*modeldef.inputs)
if modeldef.backward_setup is not None:
backward_input = modeldef.backward_setup(backward_input)
if modeldef.backward is not None:
Reported by Pylint.
benchmarks/operator_benchmark/pt/qembeddingbag_test.py
19 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.quantized as nnq
import numpy
from pt import configs
"""
Microbenchmarks for qEmbeddingBag operators.
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn.quantized as nnq
import numpy
from pt import configs
"""
Microbenchmarks for qEmbeddingBag operators.
Reported by Pylint.
Line: 6
Column: 1
import torch
import torch.nn.quantized as nnq
import numpy
from pt import configs
"""
Microbenchmarks for qEmbeddingBag operators.
"""
Reported by Pylint.
Line: 12
Column: 30
Microbenchmarks for qEmbeddingBag operators.
"""
class QEmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):
self.embedding = nnq.EmbeddingBag(
num_embeddings=embeddingbags,
embedding_dim=dim,
mode=mode,
Reported by Pylint.
Line: 33
Column: 1
return self.embedding(input, offset)
op_bench.generate_pt_test(configs.embeddingbag_short_configs, QEmbeddingBagBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 8
Column: 1
import numpy
from pt import configs
"""
Microbenchmarks for qEmbeddingBag operators.
"""
class QEmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):
Reported by Pylint.
Line: 13
Column: 66
"""
class QEmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):
self.embedding = nnq.EmbeddingBag(
num_embeddings=embeddingbags,
embedding_dim=dim,
mode=mode,
include_last_offset=include_last_offset).to(device=device)
Reported by Pylint.
Line: 14
Column: 9
class QEmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device):
self.embedding = nnq.EmbeddingBag(
num_embeddings=embeddingbags,
embedding_dim=dim,
mode=mode,
include_last_offset=include_last_offset).to(device=device)
numpy.random.seed((1 << 32) - 1)
Reported by Pylint.
Line: 20
Column: 9
mode=mode,
include_last_offset=include_last_offset).to(device=device)
numpy.random.seed((1 << 32) - 1)
self.input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long()
offset = torch.LongTensor([offset], device=device)
self.offset = torch.cat((offset, torch.tensor([self.input.size(0)], dtype=torch.long)), 0)
self.inputs = {
"input": self.input,
"offset": self.offset
Reported by Pylint.
Line: 22
Column: 9
numpy.random.seed((1 << 32) - 1)
self.input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long()
offset = torch.LongTensor([offset], device=device)
self.offset = torch.cat((offset, torch.tensor([self.input.size(0)], dtype=torch.long)), 0)
self.inputs = {
"input": self.input,
"offset": self.offset
}
self.set_module_name('qEmbeddingBag')
Reported by Pylint.
caffe2/python/crf.py
19 issues
Line: 9
Column: 1
from caffe2.python import brew, core, model_helper, recurrent
"""
Due to a limitation in ReccurentNetworkOp, this layer only supports batch_size=1
In order to support batch_size > 1, we will have to implement the CRFUnit
and its gradient in C++ and handle the different batches there.
"""
Reported by Pylint.
Line: 84
Column: 56
)
return loss
def _path_binary_scores(self, labels, transitions, seq_lengths=None):
column_ids, _ = self.model.net.RemovePadding(
[labels], outputs=2, padding_width=1, end_padding_width=0
)
row_ids, _ = self.model.net.RemovePadding(
[labels], outputs=2, padding_width=0, end_padding_width=1
Reported by Pylint.
Line: 118
Column: 60
return final_sum
def _crf_forward(
self, input_blob, initial_state, transitions_copy, seq_lengths=None
):
# Build the RNN net and get the last timestep output
out_last = self.build_crf_net(input_blob, initial_state, transitions_copy)
out_last, _ = self.model.net.Reshape(
[out_last], outputs=2, shape=(self.num_classes_padded,)
Reported by Pylint.
Line: 174
Column: 9
# A hack to bypass model cloning for test
step_model.param_init_net.AddExternalOutput(zero_segment_id)
""" the CRF step """
# Do tile
prev_transpose = brew.transpose(
step_model, cell_t_prev, [s("prev_transpose")], axes=(0, 2, 1)
)
prev_tiled = step_model.net.Tile(
Reported by Pylint.
Line: 203
Column: 9
[all_with_transitions_reshaped, zero_segment_id], [s("cell_t")]
)
step_model.net.AddExternalOutputs(cell_t)
""" recurrent network """
cell_input_blob = initial_state
out_all, out_last = recurrent.recurrent_net(
net=self.model.net,
cell_net=step_model.net,
inputs=[(input_t, input_blob)],
Reported by Pylint.
Line: 205
Column: 9
step_model.net.AddExternalOutputs(cell_t)
""" recurrent network """
cell_input_blob = initial_state
out_all, out_last = recurrent.recurrent_net(
net=self.model.net,
cell_net=step_model.net,
inputs=[(input_t, input_blob)],
initial_cell_inputs=[(cell_t_prev, cell_input_blob)],
links={cell_t_prev: cell_t},
Reported by Pylint.
Line: 1
Column: 1
## @package crf
# Module caffe2.python.crf
import numpy as np
from caffe2.python import brew, core, model_helper, recurrent
"""
Reported by Pylint.
Line: 16
Column: 1
"""
class CRFWithLoss(object):
def __init__(self, model, num_classes, transitions_blob=None):
self.model = model
self.num_classes = num_classes
self.num_classes_padded = num_classes + 2 # After adding BOS and EOS
if not transitions_blob:
Reported by Pylint.
Line: 16
Column: 1
"""
class CRFWithLoss(object):
def __init__(self, model, num_classes, transitions_blob=None):
self.model = model
self.num_classes = num_classes
self.num_classes_padded = num_classes + 2 # After adding BOS and EOS
if not transitions_blob:
Reported by Pylint.
Line: 32
Column: 5
self.transitions = transitions_blob
self.model.params.append(self.transitions)
def crf_loss(self, predictions, labels, seq_lengths=None):
# Since the transitions matrix is a shared parameter, need to
# take a snapshot of it at the beginning since it can be updated
# in between the operators that uses it when doing parallel updates
transitions_snapshot = self.model.net.Copy(
self.transitions, core.ScopedBlobReference("transitions_snapshot")
Reported by Pylint.