The following issues were found
torch/optim/_multi_tensor/rprop.py
25 issues
Line: 2
Column: 1
import torch
from ..optimizer import Optimizer
from collections import defaultdict
class Rprop(Optimizer):
"""Implements the resilient backpropagation algorithm.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
Reported by Pylint.
Line: 62
Column: 41
# State initialization
if len(state) == 0:
state['step'] = 0
state['prev'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['step_size'] = p.grad.new().resize_as_(p.grad).fill_(group['lr'])
state['step'] += 1
states.append(state)
Reported by Pylint.
Line: 62
Column: 75
# State initialization
if len(state) == 0:
state['step'] = 0
state['prev'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['step_size'] = p.grad.new().resize_as_(p.grad).fill_(group['lr'])
state['step'] += 1
states.append(state)
Reported by Pylint.
Line: 70
Column: 21
states.append(state)
step_sizes.append(state['step_size'])
signs = torch._foreach_mul(grads, [s['prev'] for s in states])
signs = [s.sign() for s in signs]
for sign in signs:
sign[sign.gt(0)] = etaplus
sign[sign.lt(0)] = etaminus
sign[sign.eq(0)] = 1
Reported by Pylint.
Line: 78
Column: 13
sign[sign.eq(0)] = 1
# update stepsizes with step size updates
torch._foreach_mul_(step_sizes, signs)
for step_size in step_sizes:
step_size.clamp_(step_size_min, step_size_max)
# for dir<0, dfdx=0
# for dir>=0 dfdx=dfdx
Reported by Pylint.
Line: 85
Column: 57
# for dir<0, dfdx=0
# for dir>=0 dfdx=dfdx
for i in range(len(grads)):
grads[i] = grads[i].clone(memory_format=torch.preserve_format)
grads[i][signs[i].eq(etaminus)] = 0
# update parameters
grad_signs = [grad.sign() for grad in grads]
torch._foreach_addcmul_(params_with_grad, grad_signs, step_sizes, value=-1)
Reported by Pylint.
Line: 90
Column: 13
# update parameters
grad_signs = [grad.sign() for grad in grads]
torch._foreach_addcmul_(params_with_grad, grad_signs, step_sizes, value=-1)
for i in range(len(states)):
states[i]['prev'].copy_(grads[i])
return loss
Reported by Pylint.
Line: 118
Column: 21
for _, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
torch._foreach_zero_(grads)
Reported by Pylint.
Line: 70
Column: 21
states.append(state)
step_sizes.append(state['step_size'])
signs = torch._foreach_mul(grads, [s['prev'] for s in states])
signs = [s.sign() for s in signs]
for sign in signs:
sign[sign.gt(0)] = etaplus
sign[sign.lt(0)] = etaminus
sign[sign.eq(0)] = 1
Reported by Pylint.
Line: 78
Column: 13
sign[sign.eq(0)] = 1
# update stepsizes with step size updates
torch._foreach_mul_(step_sizes, signs)
for step_size in step_sizes:
step_size.clamp_(step_size_min, step_size_max)
# for dir<0, dfdx=0
# for dir>=0 dfdx=dfdx
Reported by Pylint.
torch/ao/sparsity/experimental/pruner/base_pruner.py
25 issues
Line: 11
Column: 1
from torch.nn.modules.container import ModuleDict, ModuleList
from .parametrization import PruningParametrization, LinearActivationReconstruction, Conv2dActivationReconstruction
SUPPORTED_MODULES = {
nn.Linear,
nn.Conv2d
}
Reported by Pylint.
Line: 135
Column: 48
module = config['module']
if getattr(module, 'mask', None) is None:
module.register_buffer('mask', torch.tensor(module.weight.shape[0]))
param = config.get('parametrization', PruningParametrization)
parametrize.register_parametrization(module, 'weight',
param(module.mask),
unsafe=True)
Reported by Pylint.
Line: 76
Column: 21
stack = [model]
while stack:
module = stack.pop()
for name, child in module.named_children():
if type(child) in SUPPORTED_MODULES:
self.config.append(child)
else:
stack.append(child)
Reported by Pylint.
Line: 117
Column: 33
format_string += ')'
return format_string
def bias_hook(self, module, input, output):
if getattr(module, '_bias', None) is not None:
idx = [1] * len(output.shape)
idx[1] = output.shape[1]
bias = module._bias.reshape(idx)
output += bias
Reported by Pylint.
Line: 117
Column: 33
format_string += ')'
return format_string
def bias_hook(self, module, input, output):
if getattr(module, '_bias', None) is not None:
idx = [1] * len(output.shape)
idx[1] = output.shape[1]
bias = module._bias.reshape(idx)
output += bias
Reported by Pylint.
Line: 121
Column: 20
if getattr(module, '_bias', None) is not None:
idx = [1] * len(output.shape)
idx[1] = output.shape[1]
bias = module._bias.reshape(idx)
output += bias
return output
def prepare(self, use_path=False, *args, **kwargs):
r"""Adds mask parametrization to the layer weight
Reported by Pylint.
Line: 125
Column: 5
output += bias
return output
def prepare(self, use_path=False, *args, **kwargs):
r"""Adds mask parametrization to the layer weight
"""
for config in self.module_groups:
if use_path:
module = _path_to_module(self.model, config['path'])
Reported by Pylint.
Line: 125
Column: 1
output += bias
return output
def prepare(self, use_path=False, *args, **kwargs):
r"""Adds mask parametrization to the layer weight
"""
for config in self.module_groups:
if use_path:
module = _path_to_module(self.model, config['path'])
Reported by Pylint.
Line: 125
Column: 1
output += bias
return output
def prepare(self, use_path=False, *args, **kwargs):
r"""Adds mask parametrization to the layer weight
"""
for config in self.module_groups:
if use_path:
module = _path_to_module(self.model, config['path'])
Reported by Pylint.
Line: 159
Column: 1
module.bias = None
self.bias_handles.append(module.register_forward_hook(self.bias_hook))
def convert(self, use_path=False, *args, **kwargs):
for config in self.module_groups:
if use_path:
module = _path_to_module(self.model, config['path'])
else:
module = config['module']
Reported by Pylint.
torch/nn/grad.py
25 issues
Line: 4
Column: 1
"""Gradient interface"""
import torch
from .modules.utils import _single, _pair, _triple
import warnings
def _grad_input_padding(grad_output, input_size, stride, padding, kernel_size, dilation=None):
if dilation is None:
Reported by Pylint.
Line: 76
Column: 12
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose1d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
Reported by Pylint.
Line: 118
Column: 19
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2])
grad_weight = torch.conv1d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2])
Reported by Pylint.
Line: 165
Column: 12
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose2d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
Reported by Pylint.
Line: 209
Column: 19
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2], input.shape[3])
grad_weight = torch.conv2d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3])
Reported by Pylint.
Line: 258
Column: 12
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size, dilation)
return torch.conv_transpose3d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
Reported by Pylint.
Line: 302
Column: 19
input.shape[2], input.shape[3],
input.shape[4])
grad_weight = torch.conv3d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3], grad_weight.shape[4])
Reported by Pylint.
Line: 81
Column: 19
dilation)
def conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv1d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iW)
Reported by Pylint.
Line: 170
Column: 19
dilation)
def conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv2d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iH x iW)
Reported by Pylint.
Line: 263
Column: 19
dilation)
def conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv3d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iT x iH x iW)
Reported by Pylint.
caffe2/python/operator_test/im2col_col2im_test.py
25 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import assume, given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import assume, given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestReduceFrontSum(hu.HypothesisTestCase):
@given(batch_size=st.integers(1, 3),
Reported by Pylint.
Line: 24
Column: 48
channels=st.integers(1, 8),
**hu.gcs)
def test_im2col_layout(self, batch_size, stride, pad, kernel, dilation,
size, channels, gc, dc):
dkernel = (dilation * (kernel - 1) + 1)
assume(size >= dkernel)
NCHW_TO_NHWC = (0, 2, 3, 1)
Reported by Pylint.
Line: 125
Column: 68
**hu.gcs)
@settings(deadline=10000)
def test_col2im_gradients(self, batch_size, stride, pad, kernel,
dilation, size, channels, order, gc, dc):
assume(size >= dilation * (kernel - 1) + 1)
op = core.CreateOperator(
"Im2Col",
["X"], ["Y"],
stride=stride,
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import assume, given, settings
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 14
Column: 1
import numpy as np
class TestReduceFrontSum(hu.HypothesisTestCase):
@given(batch_size=st.integers(1, 3),
stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
Reported by Pylint.
Line: 23
Column: 5
size=st.integers(7, 10),
channels=st.integers(1, 8),
**hu.gcs)
def test_im2col_layout(self, batch_size, stride, pad, kernel, dilation,
size, channels, gc, dc):
dkernel = (dilation * (kernel - 1) + 1)
assume(size >= dkernel)
Reported by Pylint.
Line: 23
Column: 5
size=st.integers(7, 10),
channels=st.integers(1, 8),
**hu.gcs)
def test_im2col_layout(self, batch_size, stride, pad, kernel, dilation,
size, channels, gc, dc):
dkernel = (dilation * (kernel - 1) + 1)
assume(size >= dkernel)
Reported by Pylint.
Line: 23
Column: 5
size=st.integers(7, 10),
channels=st.integers(1, 8),
**hu.gcs)
def test_im2col_layout(self, batch_size, stride, pad, kernel, dilation,
size, channels, gc, dc):
dkernel = (dilation * (kernel - 1) + 1)
assume(size >= dkernel)
Reported by Pylint.
Line: 23
Column: 5
size=st.integers(7, 10),
channels=st.integers(1, 8),
**hu.gcs)
def test_im2col_layout(self, batch_size, stride, pad, kernel, dilation,
size, channels, gc, dc):
dkernel = (dilation * (kernel - 1) + 1)
assume(size >= dkernel)
Reported by Pylint.
caffe2/python/embedding_generation_benchmark.py
25 issues
Line: 26
Column: 14
'''
Fill a queue with input data
'''
log.info("Generating T={} batches".format(T))
generate_input_init_net = core.Net('generate_input_init')
queue = generate_input_init_net.CreateBlobsQueue(
[], "inputqueue", num_blobs=1, capacity=T,
)
Reported by Pylint.
Line: 40
Column: 22
for t in range(T):
if (t % (max(10, T // 10)) == 0):
log.info("Generating data {}/{}".format(t, T))
X = np.tile(np.arange(max_seq_length), [batch_size, 1]).transpose()
workspace.FeedBlob("scratch", X)
workspace.RunNetOnce(generate_input_net.Proto())
log.info("Finished data generation")
Reported by Pylint.
Line: 50
Column: 14
def generate_embedding_table(vocab_size, embedding_size):
log.info("Generating embedding table with dimensions {}"
.format([vocab_size, embedding_size]))
generate_table_net = core.Net('generate_table')
table = generate_table_net.GaussianFill(
[],
Reported by Pylint.
Line: 64
Column: 18
return table
def create_model(args, queue, embedding_table, embedding_size):
model = model_helper.ModelHelper(name='embedding_generation_bench')
input_blob = model.net.DequeueBlobs(queue, 'input_data')
if args.implementation == 'sinusoid':
model.net.SinusoidPositionEncoding(
Reported by Pylint.
Line: 83
Column: 31
return model
def Caffe2EmbeddingGeneration(args):
T = args.data_size // args.batch_size
queue = generate_data(T, args.batch_size, args.seq_length)
embedding_table = None
Reported by Pylint.
Line: 118
Column: 13
new_time = time.time()
log.info(
"Iter: {} / {}. Embeddings Generated Per Second: {}k.".format(
iteration,
num_iters,
(iters_once * args.batch_size * args.seq_length) /
(new_time - last_time) // 100 / 10,
)
Reported by Pylint.
Line: 130
Column: 5
total_per_sec = (num_iters - 1) * args.batch_size * args.seq_length
total_per_sec = total_per_sec / (time.time() - start_time) // 100 / 10
log.info("Done. Total embeddings generated per second " +
"excluding 1st iteration: {}k".format(total_per_sec))
return time.time() - start_time
Reported by Pylint.
Line: 137
Column: 15
@utils.debug
def Benchmark(args):
return Caffe2EmbeddingGeneration(args)
def GetArgumentParser():
parser = argparse.ArgumentParser(
Reported by Pylint.
Line: 1
Column: 1
## @package embedding_generation_benchmark
# Module caffe2.python.embedding_generation_benchmark
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, core, utils, model_helper
Reported by Pylint.
Line: 11
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, core, utils, model_helper
import argparse
import numpy as np
import time
import logging
Reported by Pylint.
caffe2/python/operator_test/weighted_sum_test.py
25 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 11
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestWeightedSumOp(serial.SerializedTestCase):
Reported by Pylint.
Line: 66
Column: 49
seed=st.integers(min_value=0, max_value=65535), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_weighted_sum_grad(
self, n, m, d, grad_on_w, seed, gc, dc):
input_names = []
input_vars = []
np.random.seed(seed)
for i in range(m):
X_name = 'X' + str(i)
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 15
Column: 1
import numpy as np
class TestWeightedSumOp(serial.SerializedTestCase):
@given(
n=st.integers(1, 8), m=st.integers(1, 10), d=st.integers(1, 4),
in_place=st.booleans(), engine=st.sampled_from(["", "CUDNN"]),
seed=st.integers(min_value=0, max_value=65535),
Reported by Pylint.
Line: 23
Column: 5
seed=st.integers(min_value=0, max_value=65535),
**hu.gcs)
@settings(deadline=10000)
def test_weighted_sum(
self, n, m, d, in_place, engine, seed, gc, dc):
input_names = []
input_vars = []
np.random.seed(seed)
for i in range(m):
Reported by Pylint.
Line: 23
Column: 5
seed=st.integers(min_value=0, max_value=65535),
**hu.gcs)
@settings(deadline=10000)
def test_weighted_sum(
self, n, m, d, in_place, engine, seed, gc, dc):
input_names = []
input_vars = []
np.random.seed(seed)
for i in range(m):
Reported by Pylint.
Line: 23
Column: 5
seed=st.integers(min_value=0, max_value=65535),
**hu.gcs)
@settings(deadline=10000)
def test_weighted_sum(
self, n, m, d, in_place, engine, seed, gc, dc):
input_names = []
input_vars = []
np.random.seed(seed)
for i in range(m):
Reported by Pylint.
Line: 23
Column: 5
seed=st.integers(min_value=0, max_value=65535),
**hu.gcs)
@settings(deadline=10000)
def test_weighted_sum(
self, n, m, d, in_place, engine, seed, gc, dc):
input_names = []
input_vars = []
np.random.seed(seed)
for i in range(m):
Reported by Pylint.
Line: 23
Column: 5
seed=st.integers(min_value=0, max_value=65535),
**hu.gcs)
@settings(deadline=10000)
def test_weighted_sum(
self, n, m, d, in_place, engine, seed, gc, dc):
input_names = []
input_vars = []
np.random.seed(seed)
for i in range(m):
Reported by Pylint.
test/test_set_default_mobile_cpu_allocator.py
25 issues
Line: 1
Column: 1
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
class TestSetDefaultMobileCPUAllocator(TestCase):
def test_no_exception(self):
torch._C._set_default_mobile_cpu_allocator()
torch._C._unset_default_mobile_cpu_allocator()
def test_exception(self):
Reported by Pylint.
Line: 2
Column: 1
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
class TestSetDefaultMobileCPUAllocator(TestCase):
def test_no_exception(self):
torch._C._set_default_mobile_cpu_allocator()
torch._C._unset_default_mobile_cpu_allocator()
def test_exception(self):
Reported by Pylint.
Line: 6
Column: 9
class TestSetDefaultMobileCPUAllocator(TestCase):
def test_no_exception(self):
torch._C._set_default_mobile_cpu_allocator()
torch._C._unset_default_mobile_cpu_allocator()
def test_exception(self):
with self.assertRaises(Exception):
torch._C._unset_default_mobile_cpu_allocator()
Reported by Pylint.
Line: 6
Column: 9
class TestSetDefaultMobileCPUAllocator(TestCase):
def test_no_exception(self):
torch._C._set_default_mobile_cpu_allocator()
torch._C._unset_default_mobile_cpu_allocator()
def test_exception(self):
with self.assertRaises(Exception):
torch._C._unset_default_mobile_cpu_allocator()
Reported by Pylint.
Line: 7
Column: 9
class TestSetDefaultMobileCPUAllocator(TestCase):
def test_no_exception(self):
torch._C._set_default_mobile_cpu_allocator()
torch._C._unset_default_mobile_cpu_allocator()
def test_exception(self):
with self.assertRaises(Exception):
torch._C._unset_default_mobile_cpu_allocator()
Reported by Pylint.
Line: 7
Column: 9
class TestSetDefaultMobileCPUAllocator(TestCase):
def test_no_exception(self):
torch._C._set_default_mobile_cpu_allocator()
torch._C._unset_default_mobile_cpu_allocator()
def test_exception(self):
with self.assertRaises(Exception):
torch._C._unset_default_mobile_cpu_allocator()
Reported by Pylint.
Line: 11
Column: 13
def test_exception(self):
with self.assertRaises(Exception):
torch._C._unset_default_mobile_cpu_allocator()
with self.assertRaises(Exception):
torch._C._set_default_mobile_cpu_allocator()
torch._C._set_default_mobile_cpu_allocator()
Reported by Pylint.
Line: 11
Column: 13
def test_exception(self):
with self.assertRaises(Exception):
torch._C._unset_default_mobile_cpu_allocator()
with self.assertRaises(Exception):
torch._C._set_default_mobile_cpu_allocator()
torch._C._set_default_mobile_cpu_allocator()
Reported by Pylint.
Line: 14
Column: 13
torch._C._unset_default_mobile_cpu_allocator()
with self.assertRaises(Exception):
torch._C._set_default_mobile_cpu_allocator()
torch._C._set_default_mobile_cpu_allocator()
# Must reset to good state
# For next test.
torch._C._unset_default_mobile_cpu_allocator()
Reported by Pylint.
Line: 14
Column: 13
torch._C._unset_default_mobile_cpu_allocator()
with self.assertRaises(Exception):
torch._C._set_default_mobile_cpu_allocator()
torch._C._set_default_mobile_cpu_allocator()
# Must reset to good state
# For next test.
torch._C._unset_default_mobile_cpu_allocator()
Reported by Pylint.
test/distributed/pipeline/sync/test_inplace.py
25 issues
Line: 7
Column: 1
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe
Reported by Pylint.
Line: 8
Column: 1
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe
Reported by Pylint.
Line: 9
Column: 1
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe
def test_inplace_on_requires_grad(setup_rpc):
Reported by Pylint.
Line: 11
Column: 1
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe
def test_inplace_on_requires_grad(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1), nn.ReLU(inplace=True))
model = Pipe(model, checkpoint="always")
Reported by Pylint.
Line: 14
Column: 35
from torch.distributed.pipeline.sync import Pipe
def test_inplace_on_requires_grad(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1), nn.ReLU(inplace=True))
model = Pipe(model, checkpoint="always")
x = torch.rand(1)
y = model(x).local_value()
Reported by Pylint.
Line: 27
Column: 39
@pytest.mark.xfail(strict=True)
def test_inplace_on_not_requires_grad(setup_rpc):
# In-place operation on a tensor not requiring grad doesn't cause a
# RuntimeError. Currently, we cannot detect this case.
model = nn.Sequential(nn.ReLU(inplace=True))
model = Pipe(model, [1], devices=["cpu"], checkpoint="always")
Reported by Pylint.
Line: 43
Column: 33
@pytest.mark.xfail(strict=True)
def test_inplace_incorrect_grad(setup_rpc):
class M(nn.Module):
def forward(self, foo_bar):
# 'foo' requires grad but 'bar' does not. In-place operation on
# 'bar' won't cause a RuntimeError.
foo, bar = foo_bar
Reported by Pylint.
Line: 1
Column: 1
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
Reported by Pylint.
Line: 14
Column: 1
from torch.distributed.pipeline.sync import Pipe
def test_inplace_on_requires_grad(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1), nn.ReLU(inplace=True))
model = Pipe(model, checkpoint="always")
x = torch.rand(1)
y = model(x).local_value()
Reported by Pylint.
Line: 18
Column: 5
model = nn.Sequential(nn.Linear(1, 1), nn.ReLU(inplace=True))
model = Pipe(model, checkpoint="always")
x = torch.rand(1)
y = model(x).local_value()
message = r"a leaf Variable that requires grad .* used in an in-place operation."
with pytest.raises(RuntimeError, match=message):
y.backward()
Reported by Pylint.
torch/distributed/elastic/metrics/api.py
25 issues
Line: 69
Column: 9
# pyre-fixme[9]: group has type `str`; used as `None`.
def configure(handler: MetricHandler, group: str = None):
if group is None:
global _default_metrics_handler
# pyre-fixme[9]: _default_metrics_handler has type `NullMetricHandler`; used
# as `MetricHandler`.
_default_metrics_handler = handler
else:
_metrics_map[group] = handler
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
Reported by Pylint.
Line: 19
Column: 1
MetricData = namedtuple("MetricData", ["timestamp", "group_name", "name", "value"])
class MetricsConfig:
__slots__ = ["params"]
def __init__(self, params: Optional[Dict[str, str]] = None):
self.params = params
if self.params is None:
Reported by Pylint.
Line: 19
Column: 1
MetricData = namedtuple("MetricData", ["timestamp", "group_name", "name", "value"])
class MetricsConfig:
__slots__ = ["params"]
def __init__(self, params: Optional[Dict[str, str]] = None):
self.params = params
if self.params is None:
Reported by Pylint.
Line: 28
Column: 1
self.params = {}
class MetricHandler(abc.ABC):
@abc.abstractmethod
def emit(self, metric_data: MetricData):
pass
Reported by Pylint.
Line: 28
Column: 1
self.params = {}
class MetricHandler(abc.ABC):
@abc.abstractmethod
def emit(self, metric_data: MetricData):
pass
Reported by Pylint.
Line: 30
Column: 5
class MetricHandler(abc.ABC):
@abc.abstractmethod
def emit(self, metric_data: MetricData):
pass
class ConsoleMetricHandler(MetricHandler):
def emit(self, metric_data: MetricData):
Reported by Pylint.
Line: 34
Column: 1
pass
class ConsoleMetricHandler(MetricHandler):
def emit(self, metric_data: MetricData):
print(
"[{}][{}]: {}={}".format(
metric_data.timestamp,
metric_data.group_name,
Reported by Pylint.
Line: 34
Column: 1
pass
class ConsoleMetricHandler(MetricHandler):
def emit(self, metric_data: MetricData):
print(
"[{}][{}]: {}={}".format(
metric_data.timestamp,
metric_data.group_name,
Reported by Pylint.
Line: 46
Column: 1
)
class NullMetricHandler(MetricHandler):
def emit(self, metric_data: MetricData):
pass
class MetricStream:
Reported by Pylint.
caffe2/python/operator_test/decay_adagrad_test.py
25 issues
Line: 3
Column: 1
import functools
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 4
Column: 1
import functools
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 15
Column: 78
@staticmethod
def ref_decay_adagrad(param, mom1, mom2, grad, LR, ITER,
beta1, beta2, epsilon, weight_decay, bias_correction_first, output_grad=False):
t = ITER + 1
mom1_out = (beta1 * mom1) + (1 - beta1) * grad
mom2_out = mom2 + np.square(grad)
if bias_correction_first:
c = 1 - np.power(beta1, t)
Reported by Pylint.
Line: 15
Column: 25
@staticmethod
def ref_decay_adagrad(param, mom1, mom2, grad, LR, ITER,
beta1, beta2, epsilon, weight_decay, bias_correction_first, output_grad=False):
t = ITER + 1
mom1_out = (beta1 * mom1) + (1 - beta1) * grad
mom2_out = mom2 + np.square(grad)
if bias_correction_first:
c = 1 - np.power(beta1, t)
Reported by Pylint.
Line: 41
Column: 93
weight_decay=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**hu.gcs_cpu_only)
def test_decay_adagrad(self, inputs, ITER, LR, beta1, beta2, epsilon, weight_decay, gc, dc):
bias_correction_first = True
param, mom1, mom2, grad = inputs
mom2 = np.abs(mom2)
ITER = np.array([ITER], dtype=np.int64)
Reported by Pylint.
Line: 1
Column: 1
import functools
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 11
Column: 1
import caffe2.python.hypothesis_test_util as hu
class TestDecayAdagrad(hu.HypothesisTestCase):
@staticmethod
def ref_decay_adagrad(param, mom1, mom2, grad, LR, ITER,
beta1, beta2, epsilon, weight_decay, bias_correction_first, output_grad=False):
t = ITER + 1
Reported by Pylint.
Line: 14
Column: 5
class TestDecayAdagrad(hu.HypothesisTestCase):
@staticmethod
def ref_decay_adagrad(param, mom1, mom2, grad, LR, ITER,
beta1, beta2, epsilon, weight_decay, bias_correction_first, output_grad=False):
t = ITER + 1
mom1_out = (beta1 * mom1) + (1 - beta1) * grad
mom2_out = mom2 + np.square(grad)
if bias_correction_first:
Reported by Pylint.
Line: 14
Column: 5
class TestDecayAdagrad(hu.HypothesisTestCase):
@staticmethod
def ref_decay_adagrad(param, mom1, mom2, grad, LR, ITER,
beta1, beta2, epsilon, weight_decay, bias_correction_first, output_grad=False):
t = ITER + 1
mom1_out = (beta1 * mom1) + (1 - beta1) * grad
mom2_out = mom2 + np.square(grad)
if bias_correction_first:
Reported by Pylint.
Line: 14
Column: 5
class TestDecayAdagrad(hu.HypothesisTestCase):
@staticmethod
def ref_decay_adagrad(param, mom1, mom2, grad, LR, ITER,
beta1, beta2, epsilon, weight_decay, bias_correction_first, output_grad=False):
t = ITER + 1
mom1_out = (beta1 * mom1) + (1 - beta1) * grad
mom2_out = mom2 + np.square(grad)
if bias_correction_first:
Reported by Pylint.