The following issues were found
torch/fx/passes/net_min_base.py
23 issues
Line: 8
Column: 1
import torch.fx
from torch.fx.node import map_arg
from .shape_prop import ShapeProp
from .split_utils import split_by_tags
from .tools_common import (
Tensors,
TensorOrTensors,
NodeList,
Reported by Pylint.
Line: 9
Column: 1
from torch.fx.node import map_arg
from .shape_prop import ShapeProp
from .split_utils import split_by_tags
from .tools_common import (
Tensors,
TensorOrTensors,
NodeList,
NodeSet,
Reported by Pylint.
Line: 10
Column: 1
from .shape_prop import ShapeProp
from .split_utils import split_by_tags
from .tools_common import (
Tensors,
TensorOrTensors,
NodeList,
NodeSet,
CALLABLE_NODE_OPS,
Reported by Pylint.
Line: 26
Column: 5
Raised if failed to split out a minimize module
"""
pass
class FxNetMinimizerRunFuncError(Exception):
"""
Raised if error occurs during run_a or run_b functions
Reported by Pylint.
Line: 34
Column: 5
Raised if error occurs during run_a or run_b functions
"""
pass
class FxNetMinimizerResultMismatchError(Exception):
"""
Raised if comparing function thinks the results are mismatching.
Reported by Pylint.
Line: 42
Column: 5
Raised if comparing function thinks the results are mismatching.
"""
pass
class _MinimizerSettingBase:
def __init__(self):
parser = argparse.ArgumentParser()
Reported by Pylint.
Line: 75
Column: 15
help="If true, when using `run_nodes()` function to run the model, intermediate results "
"of all the ops will be returned as output.",
)
args, unknown = parser.parse_known_args()
self.accumulate_error: bool = args.accumulate_error
self.traverse_method: str = args.traverse_method
self.find_all: bool = args.find_all
self.return_intermediate: bool = args.return_intermediate
Reported by Pylint.
Line: 226
Column: 28
if self.settings.accumulate_error:
print(f"Can't find previous stored outputs named {placeholders}!")
def get_inputs(self: torch.nn.Module, inputs: Any):
nonlocal a_input
a_input = inputs
# Use forward hook to get the inputs to the submodule
handle = submodule.register_forward_pre_hook(get_inputs)
Reported by Pylint.
Line: 385
Column: 17
culprits.update(self._binary_search_impl(nodes[mid:]))
if len(culprits) == 0:
raise FxNetMinimizerBadModuleError(
"Found an error in a group of nodes, but was not able to minimize",
nodes,
)
return culprits
else:
Reported by Pylint.
Line: 1
Column: 1
import argparse
from typing import Any, Callable, Tuple, Dict, Optional
import torch
import torch.fx
from torch.fx.node import map_arg
from .shape_prop import ShapeProp
from .split_utils import split_by_tags
Reported by Pylint.
caffe2/python/operator_test/basic_rnn_test.py
23 issues
Line: 11
Column: 1
from caffe2.python.rnn.rnn_cell_test_util import tanh
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
from hypothesis import settings as ht_settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 12
Column: 1
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
from hypothesis import settings as ht_settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 13
Column: 1
from hypothesis import given
from hypothesis import settings as ht_settings
import hypothesis.strategies as st
import numpy as np
import unittest
def basic_rnn_reference(input, hidden_initial,
Reported by Pylint.
Line: 18
Column: 25
import unittest
def basic_rnn_reference(input, hidden_initial,
i2h_w, i2h_b,
gate_w, gate_b,
seq_lengths,
drop_states,
use_sequence_lengths):
Reported by Pylint.
Line: 23
Column: 25
gate_w, gate_b,
seq_lengths,
drop_states,
use_sequence_lengths):
D = hidden_initial.shape[-1]
T = input.shape[0]
N = input.shape[1]
if seq_lengths is not None:
Reported by Pylint.
Line: 65
Column: 59
)
@ht_settings(max_examples=15)
def test_basic_rnn(self, seed, seq_length, batch_size, input_size, hidden_size,
drop_states, sequence_lengths, gc, dc):
np.random.seed(seed)
seq_lengths_data = np.random.randint(
1, seq_length + 1, size=(batch_size,)).astype(np.int32)
input_blob_data = np.random.randn(
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import workspace, core, rnn_cell
from caffe2.python.model_helper import ModelHelper
from caffe2.python.rnn.rnn_cell_test_util import tanh
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 15
Column: 1
from hypothesis import settings as ht_settings
import hypothesis.strategies as st
import numpy as np
import unittest
def basic_rnn_reference(input, hidden_initial,
i2h_w, i2h_b,
gate_w, gate_b,
Reported by Pylint.
Line: 18
Column: 1
import unittest
def basic_rnn_reference(input, hidden_initial,
i2h_w, i2h_b,
gate_w, gate_b,
seq_lengths,
drop_states,
use_sequence_lengths):
Reported by Pylint.
Line: 18
Column: 1
import unittest
def basic_rnn_reference(input, hidden_initial,
i2h_w, i2h_b,
gate_w, gate_b,
seq_lengths,
drop_states,
use_sequence_lengths):
Reported by Pylint.
caffe2/python/session.py
23 issues
Line: 187
Column: 26
"""
def __init__(self, ws=None):
Session.__init__(self)
self._ws = ws or workspace.C.Workspace.current
@classmethod
def _compile_task_group(cls, task_group, setup_net_list=None):
with Cluster():
task = task_group.to_task()
Reported by Pylint.
Line: 207
Column: 13
outputs.append(core.BlobReference(str(name)))
output_list.set_values(outputs, _fetch_func=self._fetch_output)
task_ws = (
workspace.C.Workspace(self._ws)
if workspace_type == WorkspaceType.PRIVATE else self._ws)
with workspace.WorkspaceGuard(task_ws):
task_ws.run(plan)
def _fetch_output(self, output):
Reported by Pylint.
Line: 103
Column: 21
"Require {} but already have {}".format(
workspace_type, runnable.workspace_type())
else:
runnable._workspace_type = workspace_type
tg = runnable
else:
if workspace_type is None:
workspace_type = WorkspaceType.GLOBAL
tg = TaskGroup(workspace_type=workspace_type)
Reported by Pylint.
Line: 161
Column: 46
raise NotImplementedError()
@classmethod
def _compile_task_group(cls, task_group, setup_net_list=None):
return task_group
def _do_close(self):
pass
Reported by Pylint.
Line: 176
Column: 1
self.close()
class LocalSession(Session):
"""
Session that runs in a single node.
Tasks are all remapped to run in parallel in the 'local' node.
Currently, LocalSession runs all parallel tasks in the same workspace,
Reported by Pylint.
Line: 197
Column: 5
plan.AddStep(task.get_step())
return (plan, task.output_list(), task.workspace_type())
def _run_compiled(self, compiled):
plan, output_list, workspace_type = compiled
# make sure the output blobs belong to the parent workspace
outputs = []
for name in output_list.names():
Reported by Pylint.
Line: 1
Column: 1
## @package session
# Module caffe2.python.session
from caffe2.python import core, workspace
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python.task import Cluster, Task, TaskGroup, WorkspaceType
class CompiledRunnable(object):
""" Wrapper for compiled runnable returned from session.compile() """
def __init__(self, obj, session_class):
self.obj = obj
self.session_class = session_class
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python.task import Cluster, Task, TaskGroup, WorkspaceType
class CompiledRunnable(object):
""" Wrapper for compiled runnable returned from session.compile() """
def __init__(self, obj, session_class):
self.obj = obj
self.session_class = session_class
Reported by Pylint.
Line: 20
Column: 1
self.session_class = session_class
class Session(object):
"""
Allows to run Nets, ExecutionSteps, Plans, Tasks and TaskGroups.
A session can potentially run in multiple nodes concurrently.
Reported by Pylint.
caffe2/quantization/server/lstm_unit_dnnlowp_op_test.py
23 issues
Line: 6
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
Reported by Pylint.
Line: 9
Column: 1
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
Reported by Pylint.
Line: 24
Column: 61
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_lstm_unit(self, N, D, forget_bias, gc, dc):
# X has scale 1, so exactly represented after quantization
H_in = np.clip(np.random.randn(1, N, D), -1, 1).astype(np.float32)
C_in = np.clip(np.random.randn(1, N, D), -1, 1).astype(np.float32)
G = np.clip(np.random.randn(1, N, 4 * D), -1, 1).astype(np.float32)
Reported by Pylint.
Line: 24
Column: 44
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_lstm_unit(self, N, D, forget_bias, gc, dc):
# X has scale 1, so exactly represented after quantization
H_in = np.clip(np.random.randn(1, N, D), -1, 1).astype(np.float32)
C_in = np.clip(np.random.randn(1, N, D), -1, 1).astype(np.float32)
G = np.clip(np.random.randn(1, N, 4 * D), -1, 1).astype(np.float32)
Reported by Pylint.
Line: 1
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
Reported by Pylint.
Line: 16
Column: 1
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPLSTMUnitOpTest(hu.HypothesisTestCase):
@given(
N=st.integers(0, 64),
D=st.integers(4, 64),
forget_bias=st.integers(0, 4),
**hu.gcs_cpu_only
Reported by Pylint.
Line: 23
Column: 5
forget_bias=st.integers(0, 4),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_lstm_unit(self, N, D, forget_bias, gc, dc):
# X has scale 1, so exactly represented after quantization
H_in = np.clip(np.random.randn(1, N, D), -1, 1).astype(np.float32)
C_in = np.clip(np.random.randn(1, N, D), -1, 1).astype(np.float32)
Reported by Pylint.
Line: 23
Column: 5
forget_bias=st.integers(0, 4),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_lstm_unit(self, N, D, forget_bias, gc, dc):
# X has scale 1, so exactly represented after quantization
H_in = np.clip(np.random.randn(1, N, D), -1, 1).astype(np.float32)
C_in = np.clip(np.random.randn(1, N, D), -1, 1).astype(np.float32)
Reported by Pylint.
Line: 23
Column: 5
forget_bias=st.integers(0, 4),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_lstm_unit(self, N, D, forget_bias, gc, dc):
# X has scale 1, so exactly represented after quantization
H_in = np.clip(np.random.randn(1, N, D), -1, 1).astype(np.float32)
C_in = np.clip(np.random.randn(1, N, D), -1, 1).astype(np.float32)
Reported by Pylint.
Line: 23
Column: 5
forget_bias=st.integers(0, 4),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_lstm_unit(self, N, D, forget_bias, gc, dc):
# X has scale 1, so exactly represented after quantization
H_in = np.clip(np.random.randn(1, N, D), -1, 1).astype(np.float32)
C_in = np.clip(np.random.randn(1, N, D), -1, 1).astype(np.float32)
Reported by Pylint.
test/custom_operator/test_custom_ops.py
23 issues
Line: 4
Column: 1
import os.path
import tempfile
import torch
from torch import ops
from model import Model, get_custom_op_library_path
from torch.testing._internal.common_utils import TestCase, run_tests
Reported by Pylint.
Line: 5
Column: 1
import tempfile
import torch
from torch import ops
from model import Model, get_custom_op_library_path
from torch.testing._internal.common_utils import TestCase, run_tests
Reported by Pylint.
Line: 8
Column: 1
from torch import ops
from model import Model, get_custom_op_library_path
from torch.testing._internal.common_utils import TestCase, run_tests
class TestCustomOperators(TestCase):
def setUp(self):
self.library_path = get_custom_op_library_path()
Reported by Pylint.
Line: 1
Column: 1
import os.path
import tempfile
import torch
from torch import ops
from model import Model, get_custom_op_library_path
from torch.testing._internal.common_utils import TestCase, run_tests
Reported by Pylint.
Line: 8
Column: 1
from torch import ops
from model import Model, get_custom_op_library_path
from torch.testing._internal.common_utils import TestCase, run_tests
class TestCustomOperators(TestCase):
def setUp(self):
self.library_path = get_custom_op_library_path()
Reported by Pylint.
Line: 8
Column: 1
from torch import ops
from model import Model, get_custom_op_library_path
from torch.testing._internal.common_utils import TestCase, run_tests
class TestCustomOperators(TestCase):
def setUp(self):
self.library_path = get_custom_op_library_path()
Reported by Pylint.
Line: 11
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests
class TestCustomOperators(TestCase):
def setUp(self):
self.library_path = get_custom_op_library_path()
ops.load_library(self.library_path)
def test_custom_library_is_loaded(self):
Reported by Pylint.
Line: 12
Column: 5
class TestCustomOperators(TestCase):
def setUp(self):
self.library_path = get_custom_op_library_path()
ops.load_library(self.library_path)
def test_custom_library_is_loaded(self):
self.assertIn(self.library_path, ops.loaded_libraries)
Reported by Pylint.
Line: 12
Column: 5
class TestCustomOperators(TestCase):
def setUp(self):
self.library_path = get_custom_op_library_path()
ops.load_library(self.library_path)
def test_custom_library_is_loaded(self):
self.assertIn(self.library_path, ops.loaded_libraries)
Reported by Pylint.
Line: 16
Column: 5
self.library_path = get_custom_op_library_path()
ops.load_library(self.library_path)
def test_custom_library_is_loaded(self):
self.assertIn(self.library_path, ops.loaded_libraries)
def test_calling_custom_op_string(self):
output = ops.custom.op2("abc", "def")
self.assertLess(output, 0)
Reported by Pylint.
test/onnx/model_defs/lstm_flattening_result.py
23 issues
Line: 1
Column: 1
from torch import nn
from torch.nn.utils.rnn import PackedSequence
class LstmFlatteningResult(nn.LSTM):
def forward(self, input, *fargs, **fkwargs):
output, (hidden, cell) = nn.LSTM.forward(self, input, *fargs, **fkwargs)
return output, hidden, cell
Reported by Pylint.
Line: 2
Column: 1
from torch import nn
from torch.nn.utils.rnn import PackedSequence
class LstmFlatteningResult(nn.LSTM):
def forward(self, input, *fargs, **fkwargs):
output, (hidden, cell) = nn.LSTM.forward(self, input, *fargs, **fkwargs)
return output, hidden, cell
Reported by Pylint.
Line: 6
Column: 23
class LstmFlatteningResult(nn.LSTM):
def forward(self, input, *fargs, **fkwargs):
output, (hidden, cell) = nn.LSTM.forward(self, input, *fargs, **fkwargs)
return output, hidden, cell
class LstmFlatteningResultWithSeqLength(nn.Module):
def __init__(self, input_size, hidden_size, layers, bidirect, dropout, batch_first):
Reported by Pylint.
Line: 19
Column: 23
bidirectional=bidirect, dropout=dropout,
batch_first=batch_first)
def forward(self, input: PackedSequence, hx=None):
output, (hidden, cell) = self.inner_model.forward(input, hx)
return output, hidden, cell
class LstmFlatteningResultWithoutSeqLength(nn.Module):
def __init__(self, input_size, hidden_size, layers, bidirect, dropout, batch_first):
Reported by Pylint.
Line: 32
Column: 23
bidirectional=bidirect, dropout=dropout,
batch_first=batch_first)
def forward(self, input, hx=None):
output, (hidden, cell) = self.inner_model.forward(input, hx)
return output, hidden, cell
Reported by Pylint.
Line: 1
Column: 1
from torch import nn
from torch.nn.utils.rnn import PackedSequence
class LstmFlatteningResult(nn.LSTM):
def forward(self, input, *fargs, **fkwargs):
output, (hidden, cell) = nn.LSTM.forward(self, input, *fargs, **fkwargs)
return output, hidden, cell
Reported by Pylint.
Line: 5
Column: 1
from torch.nn.utils.rnn import PackedSequence
class LstmFlatteningResult(nn.LSTM):
def forward(self, input, *fargs, **fkwargs):
output, (hidden, cell) = nn.LSTM.forward(self, input, *fargs, **fkwargs)
return output, hidden, cell
class LstmFlatteningResultWithSeqLength(nn.Module):
Reported by Pylint.
Line: 5
Column: 1
from torch.nn.utils.rnn import PackedSequence
class LstmFlatteningResult(nn.LSTM):
def forward(self, input, *fargs, **fkwargs):
output, (hidden, cell) = nn.LSTM.forward(self, input, *fargs, **fkwargs)
return output, hidden, cell
class LstmFlatteningResultWithSeqLength(nn.Module):
Reported by Pylint.
Line: 6
Column: 5
class LstmFlatteningResult(nn.LSTM):
def forward(self, input, *fargs, **fkwargs):
output, (hidden, cell) = nn.LSTM.forward(self, input, *fargs, **fkwargs)
return output, hidden, cell
class LstmFlatteningResultWithSeqLength(nn.Module):
def __init__(self, input_size, hidden_size, layers, bidirect, dropout, batch_first):
Reported by Pylint.
Line: 10
Column: 1
output, (hidden, cell) = nn.LSTM.forward(self, input, *fargs, **fkwargs)
return output, hidden, cell
class LstmFlatteningResultWithSeqLength(nn.Module):
def __init__(self, input_size, hidden_size, layers, bidirect, dropout, batch_first):
super(LstmFlatteningResultWithSeqLength, self).__init__()
self.batch_first = batch_first
self.inner_model = nn.LSTM(input_size=input_size, hidden_size=hidden_size, num_layers=layers,
Reported by Pylint.
caffe2/quantization/server/fully_connected_rowwise_dnnlowp_op_test.py
23 issues
Line: 6
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import (
avoid_vpmaddubsw_overflow_fc,
Reported by Pylint.
Line: 15
Column: 1
check_quantized_results_close,
run_conv_or_fc,
)
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
Reported by Pylint.
Line: 42
Column: 9
out_quantized,
prepack_weight,
gc,
dc,
):
# X has scale 1, so exactly represented after quantization
X_min = -77
X_max = X_min + 255
X = np.round(
Reported by Pylint.
Line: 87
Column: 9
b = np.random.randn(output_channels).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("FC", ""),
("FC", "DNNLOWP_ROWWISE"),
Reported by Pylint.
Line: 1
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
Reported by Pylint.
Line: 22
Column: 1
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class RowWiseDNNLowPFullyConnectedOpTest(hu.HypothesisTestCase):
# correctness test with no quantization error in inputs
@given(
input_channels=st.sampled_from([3, 4, 5, 8, 16, 32]),
output_channels=st.integers(2, 16),
batch_size=st.integers(0, 16),
Reported by Pylint.
Line: 32
Column: 5
out_quantized=st.booleans(),
prepack_weight=st.booleans(),
**hu.gcs_cpu_only
)
def test_rowwise_dnnlowp_fully_connected_int(
self,
input_channels,
output_channels,
batch_size,
Reported by Pylint.
Line: 32
Column: 5
out_quantized=st.booleans(),
prepack_weight=st.booleans(),
**hu.gcs_cpu_only
)
def test_rowwise_dnnlowp_fully_connected_int(
self,
input_channels,
output_channels,
batch_size,
Reported by Pylint.
Line: 32
Column: 5
out_quantized=st.booleans(),
prepack_weight=st.booleans(),
**hu.gcs_cpu_only
)
def test_rowwise_dnnlowp_fully_connected_int(
self,
input_channels,
output_channels,
batch_size,
Reported by Pylint.
Line: 32
Column: 5
out_quantized=st.booleans(),
prepack_weight=st.booleans(),
**hu.gcs_cpu_only
)
def test_rowwise_dnnlowp_fully_connected_int(
self,
input_channels,
output_channels,
batch_size,
Reported by Pylint.
caffe2/python/operator_test/expand_op_test.py
23 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 11
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestExpandOp(serial.SerializedTestCase):
def _rand_shape(self, X_shape, max_length):
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 15
Column: 1
import numpy as np
class TestExpandOp(serial.SerializedTestCase):
def _rand_shape(self, X_shape, max_length):
length = np.random.randint(max_length)
shape = np.ones(length, dtype=np.int64)
i = len(X_shape) - 1
for j in reversed(range(length)):
Reported by Pylint.
Line: 16
Column: 5
class TestExpandOp(serial.SerializedTestCase):
def _rand_shape(self, X_shape, max_length):
length = np.random.randint(max_length)
shape = np.ones(length, dtype=np.int64)
i = len(X_shape) - 1
for j in reversed(range(length)):
if i >= 0:
Reported by Pylint.
Line: 16
Column: 5
class TestExpandOp(serial.SerializedTestCase):
def _rand_shape(self, X_shape, max_length):
length = np.random.randint(max_length)
shape = np.ones(length, dtype=np.int64)
i = len(X_shape) - 1
for j in reversed(range(length)):
if i >= 0:
Reported by Pylint.
Line: 29
Column: 5
shape[j] = k
return shape
def _run_expand_op_test(self, X, shape, gc, dc):
shape = np.array(shape)
op = core.CreateOperator(
'Expand',
["X", "shape"],
["Y"],
Reported by Pylint.
Line: 29
Column: 5
shape[j] = k
return shape
def _run_expand_op_test(self, X, shape, gc, dc):
shape = np.array(shape)
op = core.CreateOperator(
'Expand',
["X", "shape"],
["Y"],
Reported by Pylint.
Line: 29
Column: 5
shape[j] = k
return shape
def _run_expand_op_test(self, X, shape, gc, dc):
shape = np.array(shape)
op = core.CreateOperator(
'Expand',
["X", "shape"],
["Y"],
Reported by Pylint.
Line: 31
Column: 9
def _run_expand_op_test(self, X, shape, gc, dc):
shape = np.array(shape)
op = core.CreateOperator(
'Expand',
["X", "shape"],
["Y"],
)
def ref(X, shape):
Reported by Pylint.
test/distributed/pipeline/sync/test_copy.py
23 issues
Line: 7
Column: 1
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch.distributed.pipeline.sync.copy import Copy, Wait
from torch.distributed.pipeline.sync.stream import CPUStream, current_stream, get_device, is_cuda, new_stream, use_stream
Reported by Pylint.
Line: 8
Column: 1
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch.distributed.pipeline.sync.copy import Copy, Wait
from torch.distributed.pipeline.sync.stream import CPUStream, current_stream, get_device, is_cuda, new_stream, use_stream
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
Reported by Pylint.
Line: 10
Column: 1
import pytest
import torch
from torch.distributed.pipeline.sync.copy import Copy, Wait
from torch.distributed.pipeline.sync.stream import CPUStream, current_stream, get_device, is_cuda, new_stream, use_stream
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
Reported by Pylint.
Line: 11
Column: 1
import torch
from torch.distributed.pipeline.sync.copy import Copy, Wait
from torch.distributed.pipeline.sync.stream import CPUStream, current_stream, get_device, is_cuda, new_stream, use_stream
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def _test_copy_wait(prev_stream, next_stream, cuda_sleep=None):
Reported by Pylint.
Line: 68
Column: 35
a, b = Wait.apply(CPUStream, CPUStream, a, b)
assert a.grad_fn is b.grad_fn
assert a.grad_fn.__class__ is Wait._backward_cls
Reported by Pylint.
Line: 1
Column: 1
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
Reported by Pylint.
Line: 11
Column: 1
import torch
from torch.distributed.pipeline.sync.copy import Copy, Wait
from torch.distributed.pipeline.sync.stream import CPUStream, current_stream, get_device, is_cuda, new_stream, use_stream
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def _test_copy_wait(prev_stream, next_stream, cuda_sleep=None):
Reported by Pylint.
Line: 22
Column: 9
with use_stream(prev_stream):
if is_cuda(prev_stream):
cuda_sleep(0.5)
x = torch.ones(100, device=device, requires_grad=True)
(y,) = Copy.apply(prev_stream, next_stream, x)
(y,) = Wait.apply(prev_stream, next_stream, x)
with use_stream(next_stream):
Reported by Pylint.
Line: 24
Column: 6
cuda_sleep(0.5)
x = torch.ones(100, device=device, requires_grad=True)
(y,) = Copy.apply(prev_stream, next_stream, x)
(y,) = Wait.apply(prev_stream, next_stream, x)
with use_stream(next_stream):
assert torch.allclose(y.sum(), torch.tensor(100.0, device=device))
y.norm().backward()
Reported by Pylint.
Line: 25
Column: 6
x = torch.ones(100, device=device, requires_grad=True)
(y,) = Copy.apply(prev_stream, next_stream, x)
(y,) = Wait.apply(prev_stream, next_stream, x)
with use_stream(next_stream):
assert torch.allclose(y.sum(), torch.tensor(100.0, device=device))
y.norm().backward()
with use_stream(prev_stream):
Reported by Pylint.
torch/distributions/fishersnedecor.py
23 issues
Line: 34
Column: 27
self._gamma2 = Gamma(self.df2 * 0.5, self.df2)
if isinstance(df1, Number) and isinstance(df2, Number):
batch_shape = torch.Size()
else:
batch_shape = self.df1.size()
super(FisherSnedecor, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
Reported by Pylint.
Line: 41
Column: 23
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(FisherSnedecor, _instance)
batch_shape = torch.Size(batch_shape)
new.df1 = self.df1.expand(batch_shape)
new.df2 = self.df2.expand(batch_shape)
new._gamma1 = self._gamma1.expand(batch_shape)
new._gamma2 = self._gamma2.expand(batch_shape)
super(FisherSnedecor, new).__init__(batch_shape, validate_args=False)
Reported by Pylint.
Line: 52
Column: 44
@property
def mean(self):
df2 = self.df2.clone(memory_format=torch.contiguous_format)
df2[df2 <= 2] = nan
return df2 / (df2 - 2)
@property
def variance(self):
Reported by Pylint.
Line: 58
Column: 44
@property
def variance(self):
df2 = self.df2.clone(memory_format=torch.contiguous_format)
df2[df2 <= 4] = nan
return 2 * df2.pow(2) * (self.df1 + df2 - 2) / (self.df1 * (df2 - 2).pow(2) * (df2 - 4))
def rsample(self, sample_shape=torch.Size(())):
shape = self._extended_shape(sample_shape)
Reported by Pylint.
Line: 62
Column: 36
df2[df2 <= 4] = nan
return 2 * df2.pow(2) * (self.df1 + df2 - 2) / (self.df1 * (df2 - 2).pow(2) * (df2 - 4))
def rsample(self, sample_shape=torch.Size(())):
shape = self._extended_shape(sample_shape)
# X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2)
# Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2)
X1 = self._gamma1.rsample(sample_shape).view(shape)
X2 = self._gamma2.rsample(sample_shape).view(shape)
Reported by Pylint.
Line: 68
Column: 16
# Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2)
X1 = self._gamma1.rsample(sample_shape).view(shape)
X2 = self._gamma2.rsample(sample_shape).view(shape)
tiny = torch.finfo(X2.dtype).tiny
X2.clamp_(min=tiny)
Y = X1 / X2
Y.clamp_(min=tiny)
return Y
Reported by Pylint.
Line: 81
Column: 44
ct2 = self.df2 * 0.5
ct3 = self.df1 / self.df2
t1 = (ct1 + ct2).lgamma() - ct1.lgamma() - ct2.lgamma()
t2 = ct1 * ct3.log() + (ct1 - 1) * torch.log(value)
t3 = (ct1 + ct2) * torch.log1p(ct3 * value)
return t1 + t2 - t3
Reported by Pylint.
Line: 82
Column: 28
ct3 = self.df1 / self.df2
t1 = (ct1 + ct2).lgamma() - ct1.lgamma() - ct2.lgamma()
t2 = ct1 * ct3.log() + (ct1 - 1) * torch.log(value)
t3 = (ct1 + ct2) * torch.log1p(ct3 * value)
return t1 + t2 - t3
Reported by Pylint.
Line: 10
Column: 1
from torch.distributions.utils import broadcast_all
class FisherSnedecor(Distribution):
r"""
Creates a Fisher-Snedecor distribution parameterized by :attr:`df1` and :attr:`df2`.
Example::
Reported by Pylint.
Line: 10
Column: 1
from torch.distributions.utils import broadcast_all
class FisherSnedecor(Distribution):
r"""
Creates a Fisher-Snedecor distribution parameterized by :attr:`df1` and :attr:`df2`.
Example::
Reported by Pylint.