The following issues were found
caffe2/python/operator_test/alias_with_name_test.py
9 issues
Line: 4
Column: 1
#!/usr/bin/env python3
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, utils
from hypothesis import given
Reported by Pylint.
Line: 7
Column: 1
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, utils
from hypothesis import given
class TestAliasWithNameOp(hu.HypothesisTestCase):
@given(
shape=st.lists(st.integers(0, 5), min_size=1, max_size=3),
Reported by Pylint.
Line: 16
Column: 53
dtype=st.sampled_from([np.float32, np.int64]),
**hu.gcs
)
def test_alias_with_name_op(self, shape, dtype, dc, gc):
test_input = (100 * np.random.random(shape)).astype(dtype)
test_inputs = [test_input]
alias_op = core.CreateOperator(
"AliasWithName",
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, utils
from hypothesis import given
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given
class TestAliasWithNameOp(hu.HypothesisTestCase):
@given(
shape=st.lists(st.integers(0, 5), min_size=1, max_size=3),
dtype=st.sampled_from([np.float32, np.int64]),
**hu.gcs
)
Reported by Pylint.
Line: 15
Column: 5
shape=st.lists(st.integers(0, 5), min_size=1, max_size=3),
dtype=st.sampled_from([np.float32, np.int64]),
**hu.gcs
)
def test_alias_with_name_op(self, shape, dtype, dc, gc):
test_input = (100 * np.random.random(shape)).astype(dtype)
test_inputs = [test_input]
alias_op = core.CreateOperator(
Reported by Pylint.
Line: 15
Column: 5
shape=st.lists(st.integers(0, 5), min_size=1, max_size=3),
dtype=st.sampled_from([np.float32, np.int64]),
**hu.gcs
)
def test_alias_with_name_op(self, shape, dtype, dc, gc):
test_input = (100 * np.random.random(shape)).astype(dtype)
test_inputs = [test_input]
alias_op = core.CreateOperator(
Reported by Pylint.
Line: 15
Column: 5
shape=st.lists(st.integers(0, 5), min_size=1, max_size=3),
dtype=st.sampled_from([np.float32, np.int64]),
**hu.gcs
)
def test_alias_with_name_op(self, shape, dtype, dc, gc):
test_input = (100 * np.random.random(shape)).astype(dtype)
test_inputs = [test_input]
alias_op = core.CreateOperator(
Reported by Pylint.
Line: 28
Column: 9
)
alias_op.arg.add().CopyFrom(utils.MakeArgument("name", "whatever_name"))
def reference_func(x):
return (x,)
self.assertReferenceChecks(gc, alias_op, test_inputs, reference_func)
Reported by Pylint.
test/distributed/elastic/rendezvous/etcd_rendezvous_test.py
9 issues
Line: 10
Column: 1
import unittest
import uuid
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.rendezvous.etcd_rendezvous import create_rdzv_handler
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
if os.getenv("CIRCLECI"):
print("T85992919 temporarily disabling in circle ci", file=sys.stderr)
Reported by Pylint.
Line: 11
Column: 1
import uuid
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.rendezvous.etcd_rendezvous import create_rdzv_handler
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
if os.getenv("CIRCLECI"):
print("T85992919 temporarily disabling in circle ci", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 12
Column: 1
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.rendezvous.etcd_rendezvous import create_rdzv_handler
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
if os.getenv("CIRCLECI"):
print("T85992919 temporarily disabling in circle ci", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 15
Column: 64
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
if os.getenv("CIRCLECI"):
print("T85992919 temporarily disabling in circle ci", file=sys.stderr)
sys.exit(0)
class EtcdRendezvousTest(unittest.TestCase):
@classmethod
Reported by Pylint.
Line: 16
Column: 5
if os.getenv("CIRCLECI"):
print("T85992919 temporarily disabling in circle ci", file=sys.stderr)
sys.exit(0)
class EtcdRendezvousTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import uuid
Reported by Pylint.
Line: 19
Column: 1
sys.exit(0)
class EtcdRendezvousTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
Reported by Pylint.
Line: 46
Column: 5
etcd_rdzv = create_rdzv_handler(rdzv_params)
self.assertIsNotNone(etcd_rdzv)
def test_etcd_rdzv_additional_params(self):
run_id = str(uuid.uuid4())
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint=f"{self._etcd_server.get_endpoint()}",
run_id=run_id,
Reported by Pylint.
Line: 64
Column: 5
self.assertIsNotNone(etcd_rdzv)
self.assertEqual(run_id, etcd_rdzv.get_run_id())
def test_get_backend(self):
run_id = str(uuid.uuid4())
rdzv_params = RendezvousParameters(
backend="etcd",
endpoint=f"{self._etcd_server.get_endpoint()}",
run_id=run_id,
Reported by Pylint.
caffe2/python/operator_test/prepend_dim_test.py
9 issues
Line: 1
Column: 1
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
from caffe2.proto import caffe2_pb2
Reported by Pylint.
Line: 12
Column: 1
from caffe2.proto import caffe2_pb2
class TestPrependDim(TestCase):
def _test_fwd_bwd(self):
old_shape = (128, 2, 4)
new_shape = (8, 16, 2, 4)
X = np.random.rand(*old_shape).astype(np.float32)
Y = np.random.rand(*new_shape).astype(np.float32)
Reported by Pylint.
Line: 13
Column: 5
class TestPrependDim(TestCase):
def _test_fwd_bwd(self):
old_shape = (128, 2, 4)
new_shape = (8, 16, 2, 4)
X = np.random.rand(*old_shape).astype(np.float32)
Y = np.random.rand(*new_shape).astype(np.float32)
Reported by Pylint.
Line: 16
Column: 9
def _test_fwd_bwd(self):
old_shape = (128, 2, 4)
new_shape = (8, 16, 2, 4)
X = np.random.rand(*old_shape).astype(np.float32)
Y = np.random.rand(*new_shape).astype(np.float32)
net = core.Net('net')
net.GivenTensorFill([], 'X', shape=old_shape, values=X.flatten())
Reported by Pylint.
Line: 17
Column: 9
old_shape = (128, 2, 4)
new_shape = (8, 16, 2, 4)
X = np.random.rand(*old_shape).astype(np.float32)
Y = np.random.rand(*new_shape).astype(np.float32)
net = core.Net('net')
net.GivenTensorFill([], 'X', shape=old_shape, values=X.flatten())
net.GivenTensorFill([], 'Y', shape=new_shape, values=Y.flatten())
Reported by Pylint.
Line: 30
Column: 9
workspace.RunNetOnce(net)
X_out = workspace.FetchBlob('X_out')
X_grad = workspace.FetchBlob('X_grad')
Y_grad = workspace.FetchBlob('Y_grad')
# Check the shape of the gradient
np.testing.assert_array_equal(X_out.shape, Y.shape)
Reported by Pylint.
Line: 31
Column: 9
workspace.RunNetOnce(net)
X_out = workspace.FetchBlob('X_out')
X_grad = workspace.FetchBlob('X_grad')
Y_grad = workspace.FetchBlob('Y_grad')
# Check the shape of the gradient
np.testing.assert_array_equal(X_out.shape, Y.shape)
np.testing.assert_array_equal(X_grad.shape, X.shape)
Reported by Pylint.
Line: 32
Column: 9
X_out = workspace.FetchBlob('X_out')
X_grad = workspace.FetchBlob('X_grad')
Y_grad = workspace.FetchBlob('Y_grad')
# Check the shape of the gradient
np.testing.assert_array_equal(X_out.shape, Y.shape)
np.testing.assert_array_equal(X_grad.shape, X.shape)
np.testing.assert_array_equal(Y_grad.shape, Y.shape)
Reported by Pylint.
Line: 39
Column: 5
np.testing.assert_array_equal(X_grad.shape, X.shape)
np.testing.assert_array_equal(Y_grad.shape, Y.shape)
def test_prepend_dim(self):
devices = [core.DeviceOption(caffe2_pb2.CPU, 0)]
if workspace.NumGpuDevices() > 0:
devices.append(core.DeviceOption(workspace.GpuDeviceType, 0))
for device_opt in devices:
Reported by Pylint.
test/linear.py
9 issues
Line: 1
Column: 1
import torch
class LinearMod(torch.nn.Linear):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
return torch._C._nn.linear(input, self.weight, self.bias)
print(torch.jit.trace(LinearMod(20, 20), torch.rand([20, 20])).graph)
Reported by Pylint.
Line: 3
Column: 5
import torch
class LinearMod(torch.nn.Linear):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
return torch._C._nn.linear(input, self.weight, self.bias)
print(torch.jit.trace(LinearMod(20, 20), torch.rand([20, 20])).graph)
Reported by Pylint.
Line: 6
Column: 23
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
return torch._C._nn.linear(input, self.weight, self.bias)
print(torch.jit.trace(LinearMod(20, 20), torch.rand([20, 20])).graph)
Reported by Pylint.
Line: 7
Column: 16
super().__init__(*args, **kwargs)
def forward(self, input):
return torch._C._nn.linear(input, self.weight, self.bias)
print(torch.jit.trace(LinearMod(20, 20), torch.rand([20, 20])).graph)
Reported by Pylint.
Line: 7
Column: 16
super().__init__(*args, **kwargs)
def forward(self, input):
return torch._C._nn.linear(input, self.weight, self.bias)
print(torch.jit.trace(LinearMod(20, 20), torch.rand([20, 20])).graph)
Reported by Pylint.
Line: 1
Column: 1
import torch
class LinearMod(torch.nn.Linear):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
return torch._C._nn.linear(input, self.weight, self.bias)
print(torch.jit.trace(LinearMod(20, 20), torch.rand([20, 20])).graph)
Reported by Pylint.
Line: 2
Column: 1
import torch
class LinearMod(torch.nn.Linear):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
return torch._C._nn.linear(input, self.weight, self.bias)
print(torch.jit.trace(LinearMod(20, 20), torch.rand([20, 20])).graph)
Reported by Pylint.
Line: 2
Column: 1
import torch
class LinearMod(torch.nn.Linear):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
return torch._C._nn.linear(input, self.weight, self.bias)
print(torch.jit.trace(LinearMod(20, 20), torch.rand([20, 20])).graph)
Reported by Pylint.
Line: 6
Column: 5
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
return torch._C._nn.linear(input, self.weight, self.bias)
print(torch.jit.trace(LinearMod(20, 20), torch.rand([20, 20])).graph)
Reported by Pylint.
test/cpp/api/tensor.cpp
9 issues
Line: 573
Column: 24
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto expected = torch::empty(tensor.sizes(), torch::kBool);
expected[0][0] = true;
expected[0][1] = false;
ASSERT_TRUE(torch::equal(tensor, expected));
ASSERT_FALSE(tensor.requires_grad());
}
{
auto tensor = torch::tensor({{true}, {false}});
ASSERT_EQ(tensor.dtype(), torch::kBool);
Reported by FlawFinder.
Line: 583
Column: 24
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto expected = torch::empty(tensor.sizes(), torch::kBool);
expected[0][0] = true;
expected[1][0] = false;
ASSERT_TRUE(torch::equal(tensor, expected));
ASSERT_FALSE(tensor.requires_grad());
}
}
TEST(TensorTest, TorchTensorCtorMultiDimWithOptions) {
Reported by FlawFinder.
Line: 959
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
TEST(TensorTest, Data) {
const auto tensor = torch::rand({3, 3});
ASSERT_TRUE(torch::equal(tensor, tensor.data()));
}
TEST(TensorTest, BackwardAndGrad) {
auto x = torch::tensor({5}, torch::dtype(torch::kFloat).requires_grad(true));
auto y = x * x;
Reported by FlawFinder.
Line: 972
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
TEST(TensorTest, BackwardCreatesOnesGrad) {
const auto x = torch::tensor({5}, torch::dtype(torch::kFloat).requires_grad(true));
x.backward();
ASSERT_TRUE(torch::equal(x.grad(),
torch::ones_like(x)));
}
TEST(TensorTest, BackwardNonScalarOutputs) {
auto x = torch::randn({5, 5}, torch::requires_grad());
Reported by FlawFinder.
Line: 1028
Column: 23
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
TEST(TensorTest, SetData) {
auto x = torch::randn({5});
auto y = torch::randn({5});
ASSERT_FALSE(torch::equal(x, y));
ASSERT_NE(x.data_ptr<float>(), y.data_ptr<float>());
x.set_data(y);
ASSERT_TRUE(torch::equal(x, y));
ASSERT_EQ(x.data_ptr<float>(), y.data_ptr<float>());
Reported by FlawFinder.
Line: 1032
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
ASSERT_NE(x.data_ptr<float>(), y.data_ptr<float>());
x.set_data(y);
ASSERT_TRUE(torch::equal(x, y));
ASSERT_EQ(x.data_ptr<float>(), y.data_ptr<float>());
}
TEST(TensorTest, RequiresGradInplace) {
auto x = torch::tensor({5.0});
Reported by FlawFinder.
Line: 1079
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
// Tests the behavior of the _reshape_alias private operator so
// that it matches the behavior of as_strided and view.
auto x = torch::randn({3, 3});
ASSERT_TRUE(torch::equal(
torch::_reshape_alias(x, {2, 2}, {1, 2}),
torch::as_strided(x, {2, 2}, {1, 2})
));
ASSERT_TRUE(torch::equal(
torch::_reshape_alias(x, {9}, {1}),
Reported by FlawFinder.
Line: 1083
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
torch::_reshape_alias(x, {2, 2}, {1, 2}),
torch::as_strided(x, {2, 2}, {1, 2})
));
ASSERT_TRUE(torch::equal(
torch::_reshape_alias(x, {9}, {1}),
x.view({-1})
));
// Test that the backward works fine.
Reported by FlawFinder.
Line: 1093
Column: 22
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
auto z = torch::clone(y).detach().requires_grad_(true);
(y * y).view({-1}).mean().backward();
torch::_reshape_alias((z * z), {9}, {1}).mean().backward();
ASSERT_TRUE(torch::equal(
y.grad(),
z.grad()
));
}
Reported by FlawFinder.
test/cpp/api/init_baseline.py
9 issues
Line: 4
Column: 1
"""Script to generate baseline values from PyTorch initialization algorithms"""
import sys
import torch
HEADER = """
#include <torch/types.h>
#include <vector>
Reported by Pylint.
Line: 19
Column: 23
PARAMETERS = "inline std::vector<std::vector<torch::Tensor>> {}() {{"
INITIALIZERS = {
"Xavier_Uniform": lambda w: torch.nn.init.xavier_uniform(w),
"Xavier_Normal": lambda w: torch.nn.init.xavier_normal(w),
"Kaiming_Normal": lambda w: torch.nn.init.kaiming_normal(w),
"Kaiming_Uniform": lambda w: torch.nn.init.kaiming_uniform(w)
}
Reported by Pylint.
Line: 20
Column: 22
INITIALIZERS = {
"Xavier_Uniform": lambda w: torch.nn.init.xavier_uniform(w),
"Xavier_Normal": lambda w: torch.nn.init.xavier_normal(w),
"Kaiming_Normal": lambda w: torch.nn.init.kaiming_normal(w),
"Kaiming_Uniform": lambda w: torch.nn.init.kaiming_uniform(w)
}
Reported by Pylint.
Line: 21
Column: 23
INITIALIZERS = {
"Xavier_Uniform": lambda w: torch.nn.init.xavier_uniform(w),
"Xavier_Normal": lambda w: torch.nn.init.xavier_normal(w),
"Kaiming_Normal": lambda w: torch.nn.init.kaiming_normal(w),
"Kaiming_Uniform": lambda w: torch.nn.init.kaiming_uniform(w)
}
def emit(initializer_parameter_map):
Reported by Pylint.
Line: 22
Column: 24
"Xavier_Uniform": lambda w: torch.nn.init.xavier_uniform(w),
"Xavier_Normal": lambda w: torch.nn.init.xavier_normal(w),
"Kaiming_Normal": lambda w: torch.nn.init.kaiming_normal(w),
"Kaiming_Uniform": lambda w: torch.nn.init.kaiming_uniform(w)
}
def emit(initializer_parameter_map):
# Don't write generated with an @ in front, else this file is recognized as generated.
Reported by Pylint.
Line: 26
Column: 1
}
def emit(initializer_parameter_map):
# Don't write generated with an @ in front, else this file is recognized as generated.
print("// @{} from {}".format('generated', __file__))
print(HEADER)
for initializer_name, weights in initializer_parameter_map.items():
print(PARAMETERS.format(initializer_name))
Reported by Pylint.
Line: 44
Column: 1
print(FOOTER)
def run(initializer):
torch.manual_seed(0)
layer1 = torch.nn.Linear(7, 15)
INITIALIZERS[initializer](layer1.weight)
Reported by Pylint.
Line: 63
Column: 1
return [weight1, weight2, weight3]
def main():
initializer_parameter_map = {}
for initializer in INITIALIZERS.keys():
sys.stderr.write('Evaluating {} ...\n'.format(initializer))
initializer_parameter_map[initializer] = run(initializer)
Reported by Pylint.
Line: 65
Column: 24
def main():
initializer_parameter_map = {}
for initializer in INITIALIZERS.keys():
sys.stderr.write('Evaluating {} ...\n'.format(initializer))
initializer_parameter_map[initializer] = run(initializer)
emit(initializer_parameter_map)
Reported by Pylint.
caffe2/python/numa_benchmark.py
9 issues
Line: 1
Column: 1
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
import time
SHAPE_LEN = 4096
Reported by Pylint.
Line: 7
Column: 1
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
import time
SHAPE_LEN = 4096
NUM_ITER = 1000
GB = 1024 * 1024 * 1024
NUM_REPLICAS = 48
Reported by Pylint.
Line: 15
Column: 1
NUM_REPLICAS = 48
def build_net(net_name, cross_socket):
init_net = core.Net(net_name + "_init")
init_net.Proto().type = "async_scheduling"
numa_device_option = caffe2_pb2.DeviceOption()
numa_device_option.device_type = caffe2_pb2.CPU
numa_device_option.numa_node_id = 0
Reported by Pylint.
Line: 36
Column: 1
return init_net, net
def main():
assert workspace.IsNUMAEnabled() and workspace.GetNumNUMANodes() >= 2
single_init, single_net = build_net("single_net", False)
cross_init, cross_net = build_net("cross_net", True)
Reported by Pylint.
Line: 37
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def main():
assert workspace.IsNUMAEnabled() and workspace.GetNumNUMANodes() >= 2
single_init, single_net = build_net("single_net", False)
cross_init, cross_net = build_net("cross_net", True)
workspace.CreateNet(single_init)
Reported by Bandit.
Line: 51
Column: 9
workspace.CreateNet(cross_net)
for _ in range(4):
t = time.time()
workspace.RunNet(single_net.Name(), NUM_ITER)
dt = time.time() - t
print("Single socket time:", dt)
single_bw = 4 * SHAPE_LEN * SHAPE_LEN * NUM_REPLICAS * NUM_ITER / dt / GB
print("Single socket BW: {} GB/s".format(single_bw))
Reported by Pylint.
Line: 53
Column: 9
for _ in range(4):
t = time.time()
workspace.RunNet(single_net.Name(), NUM_ITER)
dt = time.time() - t
print("Single socket time:", dt)
single_bw = 4 * SHAPE_LEN * SHAPE_LEN * NUM_REPLICAS * NUM_ITER / dt / GB
print("Single socket BW: {} GB/s".format(single_bw))
t = time.time()
Reported by Pylint.
Line: 58
Column: 9
single_bw = 4 * SHAPE_LEN * SHAPE_LEN * NUM_REPLICAS * NUM_ITER / dt / GB
print("Single socket BW: {} GB/s".format(single_bw))
t = time.time()
workspace.RunNet(cross_net.Name(), NUM_ITER)
dt = time.time() - t
print("Cross socket time:", dt)
cross_bw = 4 * SHAPE_LEN * SHAPE_LEN * NUM_REPLICAS * NUM_ITER / dt / GB
print("Cross socket BW: {} GB/s".format(cross_bw))
Reported by Pylint.
Line: 60
Column: 9
t = time.time()
workspace.RunNet(cross_net.Name(), NUM_ITER)
dt = time.time() - t
print("Cross socket time:", dt)
cross_bw = 4 * SHAPE_LEN * SHAPE_LEN * NUM_REPLICAS * NUM_ITER / dt / GB
print("Cross socket BW: {} GB/s".format(cross_bw))
print("Single BW / Cross BW: {}".format(single_bw / cross_bw))
Reported by Pylint.
caffe2/python/operator_test/softplus_op_test.py
9 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import unittest
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import unittest
class TestSoftplus(hu.HypothesisTestCase):
@given(X=hu.tensor(),
Reported by Pylint.
Line: 13
Column: 1
import unittest
class TestSoftplus(hu.HypothesisTestCase):
@given(X=hu.tensor(),
**hu.gcs)
@settings(deadline=10000)
def test_softplus(self, X, gc, dc):
Reported by Pylint.
Line: 18
Column: 5
@given(X=hu.tensor(),
**hu.gcs)
@settings(deadline=10000)
def test_softplus(self, X, gc, dc):
op = core.CreateOperator("Softplus", ["X"], ["Y"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
Reported by Pylint.
Line: 18
Column: 5
@given(X=hu.tensor(),
**hu.gcs)
@settings(deadline=10000)
def test_softplus(self, X, gc, dc):
op = core.CreateOperator("Softplus", ["X"], ["Y"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
Reported by Pylint.
Line: 18
Column: 5
@given(X=hu.tensor(),
**hu.gcs)
@settings(deadline=10000)
def test_softplus(self, X, gc, dc):
op = core.CreateOperator("Softplus", ["X"], ["Y"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
Reported by Pylint.
Line: 18
Column: 5
@given(X=hu.tensor(),
**hu.gcs)
@settings(deadline=10000)
def test_softplus(self, X, gc, dc):
op = core.CreateOperator("Softplus", ["X"], ["Y"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
Reported by Pylint.
Line: 19
Column: 9
**hu.gcs)
@settings(deadline=10000)
def test_softplus(self, X, gc, dc):
op = core.CreateOperator("Softplus", ["X"], ["Y"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
if __name__ == "__main__":
Reported by Pylint.
caffe2/python/operator_test/index_ops_test.py
9 issues
Line: 59
Column: 9
['index'],
['index_size']))
size = workspace.FetchBlob('index_size')
self.assertEquals(size, 6)
workspace.RunOperatorOnce(core.CreateOperator(
'IndexStore',
['index'],
['stored_entries']))
Reported by Pylint.
Line: 92
Column: 9
['index2'],
['index2_size']))
index2_size = workspace.FetchBlob('index2_size')
self.assertEquals(index2_size, 5)
# test serde
with tempfile.NamedTemporaryFile() as tmp:
workspace.RunOperatorOnce(core.CreateOperator(
'Save',
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
import tempfile
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
import tempfile
class TestIndexOps(TestCase):
def _test_index_ops(self, entries, dtype, index_create_op):
workspace.RunOperatorOnce(core.CreateOperator(
Reported by Pylint.
Line: 11
Column: 1
import tempfile
class TestIndexOps(TestCase):
def _test_index_ops(self, entries, dtype, index_create_op):
workspace.RunOperatorOnce(core.CreateOperator(
index_create_op,
[],
['index'],
Reported by Pylint.
Line: 12
Column: 5
class TestIndexOps(TestCase):
def _test_index_ops(self, entries, dtype, index_create_op):
workspace.RunOperatorOnce(core.CreateOperator(
index_create_op,
[],
['index'],
max_elements=10))
Reported by Pylint.
Line: 123
Column: 5
result3 = workspace.FetchBlob('result3')
np.testing.assert_array_equal([1, 4, 1, 5, 5], result3)
def test_string_index_ops(self):
self._test_index_ops([
'entry1', 'entry2', 'entry3', 'new_entry1',
'new_entry2', 'miss1', 'miss2', 'miss3',
], str, 'StringIndexCreate')
Reported by Pylint.
Line: 129
Column: 5
'new_entry2', 'miss1', 'miss2', 'miss3',
], str, 'StringIndexCreate')
def test_int_index_ops(self):
self._test_index_ops(list(range(8)), np.int32, 'IntIndexCreate')
def test_long_index_ops(self):
self._test_index_ops(list(range(8)), np.int64, 'LongIndexCreate')
Reported by Pylint.
Line: 132
Column: 5
def test_int_index_ops(self):
self._test_index_ops(list(range(8)), np.int32, 'IntIndexCreate')
def test_long_index_ops(self):
self._test_index_ops(list(range(8)), np.int64, 'LongIndexCreate')
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
test/jit/test_unsupported_ops.py
9 issues
Line: 4
Column: 1
import os
import sys
import torch
import unittest
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 10
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 54
Column: 26
"torch.zeros"):
torch.jit.script(zeros)
@unittest.skipIf(not torch._C.has_lapack, "PyTorch compiled without Lapack")
def test_init_ops(self):
def calculate_gain():
return torch.nn.init.calculate_gain('leaky_relu', 0.2)
def eye_():
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
import torch
import unittest
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 5
Column: 1
import sys
import torch
import unittest
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 10
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 23
Column: 1
# the corresponding section in documentation that states the unsupported behavior.
# see: `jit_unsupported.rst`
class TestUnsupportedOps(JitTestCase):
def test_factory_ops_requires_grad_fail(self):
# Keyword argument {name} unknown is a JIT-only error message,
# so these functions are succeeding in eager and failing in JIT
# Complete issue and set of ops is https://github.com/pytorch/pytorch/issues/30761
Reported by Pylint.
Line: 24
Column: 5
# see: `jit_unsupported.rst`
class TestUnsupportedOps(JitTestCase):
def test_factory_ops_requires_grad_fail(self):
# Keyword argument {name} unknown is a JIT-only error message,
# so these functions are succeeding in eager and failing in JIT
# Complete issue and set of ops is https://github.com/pytorch/pytorch/issues/30761
# only testing some because they should be fixed all at once
Reported by Pylint.
Line: 55
Column: 5
torch.jit.script(zeros)
@unittest.skipIf(not torch._C.has_lapack, "PyTorch compiled without Lapack")
def test_init_ops(self):
def calculate_gain():
return torch.nn.init.calculate_gain('leaky_relu', 0.2)
def eye_():
return torch.nn.init.eye_(torch.zeros([2, 2]))
Reported by Pylint.