The following issues were found
caffe2/python/operator_test/segment_ops_test.py
243 issues
Line: 7
Column: 1
from functools import partial
from hypothesis import given, settings
import numpy as np
import unittest
import hypothesis.strategies as st
Reported by Pylint.
Line: 11
Column: 1
import numpy as np
import unittest
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 42
Column: 20
class TesterBase:
def segment_reduce_op(self, data, segment_ids, reducer, indices=None):
segments = self.split(data, segment_ids, indices)
output = np.zeros((len(segments), ) + data.shape[1:])
for i, segment in enumerate(segments):
if len(segment) > 0:
output[i] = reducer(segment)
else:
Reported by Pylint.
Line: 60
Column: 20
output,
indices=None
):
segments = self.split(data, segment_ids, indices)
segment_grads = [
reducer_grad(grad_out[i], [output[i]], [segment])
for i, segment in enumerate(segments)
]
return self.unsplit(data.shape[1:], segment_grads, segment_ids)
Reported by Pylint.
Line: 65
Column: 16
reducer_grad(grad_out[i], [output[i]], [segment])
for i, segment in enumerate(segments)
]
return self.unsplit(data.shape[1:], segment_grads, segment_ids)
def _test(self, prefix, input_strategy, refs, gpu=False, **kwargs):
tester = self
operator_args = kwargs.pop('operator_args', {})
threshold = kwargs.pop('threshold', 1e-4)
Reported by Pylint.
Line: 310
Column: 21
GI[line] = W[line] * GO[g]
if GW is not None:
if len(GO.shape) > 1:
GW[line] = np.dot(GO[g].flatten(), D[I[line], :].flatten())
else:
GW[line] = np.dot(GO[g].flatten(), D[I[line]].flatten())
line += 1
print(GW)
return [(GI, I), GW, None, None]
Reported by Pylint.
Line: 312
Column: 21
if len(GO.shape) > 1:
GW[line] = np.dot(GO[g].flatten(), D[I[line], :].flatten())
else:
GW[line] = np.dot(GO[g].flatten(), D[I[line]].flatten())
line += 1
print(GW)
return [(GI, I), GW, None, None]
Reported by Pylint.
Line: 320
Column: 9
class TestSegmentOps(hu.HypothesisTestCase):
def test_sorted_segment_ops(self):
SegmentsTester()._test(
'SortedSegment',
hu.segmented_tensor(
dtype=np.float32,
is_sorted=True,
allow_empty=True
Reported by Pylint.
Line: 320
Column: 9
class TestSegmentOps(hu.HypothesisTestCase):
def test_sorted_segment_ops(self):
SegmentsTester()._test(
'SortedSegment',
hu.segmented_tensor(
dtype=np.float32,
is_sorted=True,
allow_empty=True
Reported by Pylint.
Line: 320
Column: 9
class TestSegmentOps(hu.HypothesisTestCase):
def test_sorted_segment_ops(self):
SegmentsTester()._test(
'SortedSegment',
hu.segmented_tensor(
dtype=np.float32,
is_sorted=True,
allow_empty=True
Reported by Pylint.
test/test_type_promotion.py
235 issues
Line: 5
Column: 1
import itertools
import unittest
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests,
TEST_NUMPY, torch_to_numpy_dtype_dict)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyOnCPUAndCUDA,
dtypes, dtypesIfCUDA, onlyCPU, expectedFailureMeta)
Reported by Pylint.
Line: 7
Column: 1
import torch
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests,
TEST_NUMPY, torch_to_numpy_dtype_dict)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyOnCPUAndCUDA,
dtypes, dtypesIfCUDA, onlyCPU, expectedFailureMeta)
if TEST_NUMPY:
Reported by Pylint.
Line: 9
Column: 1
from torch.testing._internal.common_utils import (TestCase, run_tests, load_tests,
TEST_NUMPY, torch_to_numpy_dtype_dict)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyOnCPUAndCUDA,
dtypes, dtypesIfCUDA, onlyCPU, expectedFailureMeta)
if TEST_NUMPY:
import numpy as np
Reported by Pylint.
Line: 17
Column: 1
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# Not thread-safe decorator that runs the decorated test once with
# the default dtype being torch.float and again with the default dtype
# being torch.double.
def float_double_default_dtype(fn):
Reported by Pylint.
Line: 208
Column: 3
d = torch.tensor([1, 1, 1, 1], dtype=torch.double, device=device)
torch.add(f, f, out=d)
self.assertEqual(d.dtype, torch.double)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(f + f, d)
@float_double_default_dtype
def test_mixed_type_backward(self, device):
f = torch.ones([3, 3], dtype=torch.float, requires_grad=True, device=device)
Reported by Pylint.
Line: 218
Column: 3
tens = f * ten
s = (tens + 2).sum()
s.backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(f.grad, tens)
# If we don't convert the returned grad_input to the actual input type
# we get an error like:
# RuntimeError: Function SubBackward0 returned an invalid gradient at index 0 - expected type \
Reported by Pylint.
Line: 323
Column: 3
self.assertEqual(torch.arange(False, True, device=device), expected)
self.assertEqual(torch.arange(True, device=device), expected)
expected = torch.tensor([0, 0.5], dtype=torch.get_default_dtype(), device=device)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.arange(False, True, 0.5, device=device), expected)
expected = torch.ones(0, dtype=torch.int64, device=device)
self.assertEqual(torch.arange(False, False, device=device), expected)
self.assertEqual(torch.linspace(False, True, device=device), torch.linspace(0, 1, device=device))
Reported by Pylint.
Line: 335
Column: 40
self.assertEqual(torch.scalar_tensor(False, device=device), torch.tensor(0., device=device))
@dtypes(*itertools.product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))
def test_result_type(self, device, dtypes):
"Test result_type for tensor vs tensor and scalar vs scalar."
def _get_dtype(x):
"Get the dtype of x if x is a tensor. If x is a scalar, get its corresponding dtype if it were a tensor."
if torch.is_tensor(x):
Reported by Pylint.
Line: 406
Column: 29
_test_spot(torch.tensor([1, 1], dtype=torch.bool, device=device), 1., torch.get_default_dtype())
@float_double_default_dtype
def test_can_cast(self, device):
self.assertTrue(torch.can_cast(torch.double, torch.float))
self.assertFalse(torch.can_cast(torch.float, torch.int))
@float_double_default_dtype
def test_comparison_ops_with_type_promotion(self, device):
Reported by Pylint.
Line: 428
Column: 24
dict(
name="lt",
out_op=lambda x, y, d: torch.lt(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
ret_op=lambda x, y: torch.lt(x, y),
compare_op=lambda x, y: x < y,
),
dict(
name="le",
out_op=lambda x, y, d: torch.le(x, y, out=torch.empty(0, dtype=torch.bool, device=d)),
Reported by Pylint.
test/fx/test_subgraph_rewriter.py
234 issues
Line: 4
Column: 1
import os
import sys
import torch
from torch.fx import symbolic_trace, subgraph_rewriter
from torch.fx.annotate import annotate
# Make the helper files in test/ importable
from torch.fx.experimental.rewriter import RewritingTracer
Reported by Pylint.
Line: 5
Column: 1
import sys
import torch
from torch.fx import symbolic_trace, subgraph_rewriter
from torch.fx.annotate import annotate
# Make the helper files in test/ importable
from torch.fx.experimental.rewriter import RewritingTracer
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 6
Column: 1
import torch
from torch.fx import symbolic_trace, subgraph_rewriter
from torch.fx.annotate import annotate
# Make the helper files in test/ importable
from torch.fx.experimental.rewriter import RewritingTracer
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 8
Column: 1
from torch.fx import symbolic_trace, subgraph_rewriter
from torch.fx.annotate import annotate
# Make the helper files in test/ importable
from torch.fx.experimental.rewriter import RewritingTracer
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 12
Column: 1
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_fx.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 287
Column: 17
def test_subgraph_rewriter_internal_pattern_nodes_cannot_have_users_that_are_not_matched(self):
class M(torch.nn.Module):
def forward(self, x, w1, w2, b1, b2):
m0 = torch.cat([w1, w2])
m1 = torch.cat([w1, w2])
m2 = torch.cat([x, b2])
t0 = torch.addmm(b1, m1, m2.t())
t1 = torch.sum(w1, 1)
t2 = torch.addmm(b1, m1, m2.t())
Reported by Pylint.
Line: 290
Column: 17
m0 = torch.cat([w1, w2])
m1 = torch.cat([w1, w2])
m2 = torch.cat([x, b2])
t0 = torch.addmm(b1, m1, m2.t())
t1 = torch.sum(w1, 1)
t2 = torch.addmm(b1, m1, m2.t())
return torch.sum(t1), torch.sum(t2)
def pattern(x, w1, w2, b1, b2):
Reported by Pylint.
Line: 300
Column: 40
m2 = torch.cat([x, b2])
return torch.addmm(b1, m1, m2.t())
def replacement(x, w1, w2, b1, b2):
return torch.cat([x, w1, w2])
traced = symbolic_trace(M())
# Result should be [] since no matches can be found
Reported by Pylint.
Line: 300
Column: 36
m2 = torch.cat([x, b2])
return torch.addmm(b1, m1, m2.t())
def replacement(x, w1, w2, b1, b2):
return torch.cat([x, w1, w2])
traced = symbolic_trace(M())
# Result should be [] since no matches can be found
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
import torch
from torch.fx import symbolic_trace, subgraph_rewriter
from torch.fx.annotate import annotate
# Make the helper files in test/ importable
from torch.fx.experimental.rewriter import RewritingTracer
Reported by Pylint.
torch/onnx/symbolic_helper.py
233 issues
Line: 297
Column: 42
index_dim = _get_tensor_rank(index)
if not _is_value(index_const):
# Index is a constant scalar. Make it a size 1 constant tensor.
index = g.op("Constant", value_t=torch.LongTensor([index_const]))
elif index_dim is not None and apply_reshape:
if index_dim == 0:
# Index is a scalar. Reshape it to a size 1 tensor.
index = g.op("Reshape", index, g.op("Constant", value_t=torch.LongTensor([1])))
Reported by Pylint.
Line: 301
Column: 69
elif index_dim is not None and apply_reshape:
if index_dim == 0:
# Index is a scalar. Reshape it to a size 1 tensor.
index = g.op("Reshape", index, g.op("Constant", value_t=torch.LongTensor([1])))
index_scalar_type = index.type().scalarType()
if index_scalar_type is None or index_scalar_type not in ["Long", "Int"]:
index = g.op("Cast", index, to_i=cast_pytorch_to_onnx["Long"])
return g.op("Gather", self, index, axis_i=dim)
Reported by Pylint.
Line: 331
Column: 64
def _dtype_is_fp(type_value):
if type_value:
return (type_value == torch.float16) or (type_value == torch.float32) or (type_value == torch.float64)
return False
def _generate_wrapped_number(g, scalar):
"""
Create a wrapped number based on https://github.com/pytorch/pytorch/issues/9515
Reported by Pylint.
Line: 331
Column: 31
def _dtype_is_fp(type_value):
if type_value:
return (type_value == torch.float16) or (type_value == torch.float32) or (type_value == torch.float64)
return False
def _generate_wrapped_number(g, scalar):
"""
Create a wrapped number based on https://github.com/pytorch/pytorch/issues/9515
Reported by Pylint.
Line: 331
Column: 97
def _dtype_is_fp(type_value):
if type_value:
return (type_value == torch.float16) or (type_value == torch.float32) or (type_value == torch.float64)
return False
def _generate_wrapped_number(g, scalar):
"""
Create a wrapped number based on https://github.com/pytorch/pytorch/issues/9515
Reported by Pylint.
Line: 348
Column: 41
"""
assert not isinstance(scalar, torch.Tensor)
if isinstance(scalar, float):
return g.op("Constant", value_t=torch.tensor(scalar, dtype=torch.double))
return g.op("Constant", value_t=torch.tensor(scalar))
def _sort_helper(g, input, dim, decending=True, out=None):
if out is not None:
_unimplemented("Sort", "Out parameter is not supported")
Reported by Pylint.
Line: 348
Column: 68
"""
assert not isinstance(scalar, torch.Tensor)
if isinstance(scalar, float):
return g.op("Constant", value_t=torch.tensor(scalar, dtype=torch.double))
return g.op("Constant", value_t=torch.tensor(scalar))
def _sort_helper(g, input, dim, decending=True, out=None):
if out is not None:
_unimplemented("Sort", "Out parameter is not supported")
Reported by Pylint.
Line: 349
Column: 37
assert not isinstance(scalar, torch.Tensor)
if isinstance(scalar, float):
return g.op("Constant", value_t=torch.tensor(scalar, dtype=torch.double))
return g.op("Constant", value_t=torch.tensor(scalar))
def _sort_helper(g, input, dim, decending=True, out=None):
if out is not None:
_unimplemented("Sort", "Out parameter is not supported")
shape_ = g.op("Shape", input)
Reported by Pylint.
Line: 355
Column: 65
if out is not None:
_unimplemented("Sort", "Out parameter is not supported")
shape_ = g.op("Shape", input)
dim_size_ = g.op("Gather", shape_, g.op("Constant", value_t=torch.tensor([dim], dtype=torch.int64)))
if _export_onnx_opset_version <= 10:
if not decending:
_unimplemented("Sort", "Ascending is not supported")
return g.op("TopK", input, dim_size_, axis_i=dim, outputs=2)
else:
Reported by Pylint.
Line: 355
Column: 91
if out is not None:
_unimplemented("Sort", "Out parameter is not supported")
shape_ = g.op("Shape", input)
dim_size_ = g.op("Gather", shape_, g.op("Constant", value_t=torch.tensor([dim], dtype=torch.int64)))
if _export_onnx_opset_version <= 10:
if not decending:
_unimplemented("Sort", "Ascending is not supported")
return g.op("TopK", input, dim_size_, axis_i=dim, outputs=2)
else:
Reported by Pylint.
test/jit/test_pdt.py
233 issues
Line: 3
Column: 1
import os
import sys
import torch
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.jit._monkeytype_config import _IS_MONKEYTYPE_INSTALLED
from typing import List, Dict, Tuple, Any, Optional, NamedTuple # noqa: F401
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 4
Column: 1
import os
import sys
import torch
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.jit._monkeytype_config import _IS_MONKEYTYPE_INSTALLED
from typing import List, Dict, Tuple, Any, Optional, NamedTuple # noqa: F401
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 5
Column: 1
import sys
import torch
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.jit._monkeytype_config import _IS_MONKEYTYPE_INSTALLED
from typing import List, Dict, Tuple, Any, Optional, NamedTuple # noqa: F401
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 6
Column: 1
import torch
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.jit._monkeytype_config import _IS_MONKEYTYPE_INSTALLED
from typing import List, Dict, Tuple, Any, Optional, NamedTuple # noqa: F401
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 29
Column: 13
"""
def test_nn_module(self):
class TestPDTModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x) -> Any:
if isinstance(x, int):
return x + 1
Reported by Pylint.
Line: 43
Column: 30
make_global(TestPDTModel)
pdt_model = TestPDTModel()
inp: List[Tuple[Any, ...]] = [(20, ), (2.7, ), (False, ), ]
scripted_pdt_model = torch.jit._script_pdt(pdt_model, example_inputs={pdt_model: inp})
self.assertEqual(scripted_pdt_model(50), pdt_model(50))
self.assertEqual(scripted_pdt_model(1.8), pdt_model(1.8))
self.assertTrue(scripted_pdt_model(True), pdt_model(True))
def test_nested_nn_module_class(self):
Reported by Pylint.
Line: 50
Column: 13
def test_nested_nn_module_class(self):
class NestedPDTInner(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
if isinstance(x, int):
return x * 10
Reported by Pylint.
Line: 70
Column: 30
inner_pdt_model = NestedPDTInner()
wrapped_pdt_model = NestedModulePDTWrapper(inner_pdt_model)
inp: List[Tuple[Any, ...]] = [(20, ), (False, )]
scripted_pdt_model = torch.jit._script_pdt(wrapped_pdt_model, example_inputs={wrapped_pdt_model: inp})
self.assertEqual(scripted_pdt_model(30), wrapped_pdt_model(30))
self.assertEqual(scripted_pdt_model(1.9), wrapped_pdt_model(1.9))
self.assertTrue(scripted_pdt_model(True), wrapped_pdt_model(True))
def test_nested_nn_module_class_with_args(self):
Reported by Pylint.
Line: 77
Column: 13
def test_nested_nn_module_class_with_args(self):
class NestedModulePDTInner(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
if isinstance(x, int):
return x * 10 + y
Reported by Pylint.
Line: 98
Column: 30
outer_pdt_model = NestedModulePDTOuter(inner_pdt_model)
inner_input: List[Tuple[Any, ...]] = [(10, 10), (1.9, 20), ]
outer_input: List[Tuple[Any, ...]] = [(20, ), (False, )]
scripted_pdt_model = torch.jit._script_pdt(outer_pdt_model, example_inputs={inner_pdt_model: inner_input,
outer_pdt_model: outer_input, })
self.assertEqual(scripted_pdt_model(30), outer_pdt_model(30))
self.assertEqual(scripted_pdt_model(1.9), outer_pdt_model(1.9))
self.assertTrue(scripted_pdt_model(True), outer_pdt_model(True))
Reported by Pylint.
test/test_sort_and_select.py
232 issues
Line: 1
Column: 1
import torch
import numpy as np
import random
from torch._six import nan
from itertools import permutations, product
from torch.testing import all_types, all_types_and
from torch.testing._internal.common_utils import \
Reported by Pylint.
Line: 5
Column: 1
import numpy as np
import random
from torch._six import nan
from itertools import permutations, product
from torch.testing import all_types, all_types_and
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TestCase, run_tests, make_tensor, slowTest)
Reported by Pylint.
Line: 8
Column: 1
from torch._six import nan
from itertools import permutations, product
from torch.testing import all_types, all_types_and
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TestCase, run_tests, make_tensor, slowTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, onlyOnCPUAndCUDA,
skipCUDAIfRocm, onlyCUDA, dtypesIfCUDA, dtypesIfCPU, onlyCPU, largeTensorTest)
Reported by Pylint.
Line: 9
Column: 1
from itertools import permutations, product
from torch.testing import all_types, all_types_and
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TestCase, run_tests, make_tensor, slowTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, onlyOnCPUAndCUDA,
skipCUDAIfRocm, onlyCUDA, dtypesIfCUDA, dtypesIfCPU, onlyCPU, largeTensorTest)
Reported by Pylint.
Line: 11
Column: 1
from torch.testing import all_types, all_types_and
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TestCase, run_tests, make_tensor, slowTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, onlyOnCPUAndCUDA,
skipCUDAIfRocm, onlyCUDA, dtypesIfCUDA, dtypesIfCPU, onlyCPU, largeTensorTest)
# TODO: remove this
SIZE = 100
Reported by Pylint.
Line: 33
Column: 13
# see above
return ((b != b) | (a <= b)).all().item()
else:
error('unknown order "{}", must be "ascending" or "descending"'.format(order))
are_ordered = True
for k in range(1, SIZE):
self.assertTrue(check_order(mxx[:, k - 1], mxx[:, k]),
'torch.sort ({}) values unordered for {}'.format(order, task))
Reported by Pylint.
Line: 15
Column: 3
(instantiate_device_type_tests, dtypes, onlyOnCPUAndCUDA,
skipCUDAIfRocm, onlyCUDA, dtypesIfCUDA, dtypesIfCPU, onlyCPU, largeTensorTest)
# TODO: remove this
SIZE = 100
class TestSortAndSelect(TestCase):
def assertIsOrdered(self, order, x, mxx, ixx, task):
Reported by Pylint.
Line: 21
Column: 9
class TestSortAndSelect(TestCase):
def assertIsOrdered(self, order, x, mxx, ixx, task):
SIZE = x.size(1)
if order == 'descending':
def check_order(a, b):
# `a != a` because we put NaNs
# at the end of ascending sorted lists,
# and the beginning of descending ones.
Reported by Pylint.
Line: 35
Column: 9
else:
error('unknown order "{}", must be "ascending" or "descending"'.format(order))
are_ordered = True
for k in range(1, SIZE):
self.assertTrue(check_order(mxx[:, k - 1], mxx[:, k]),
'torch.sort ({}) values unordered for {}'.format(order, task))
seen = set()
Reported by Pylint.
Line: 41
Column: 9
'torch.sort ({}) values unordered for {}'.format(order, task))
seen = set()
indicesCorrect = True
size0 = x.size(0)
size = x.size(x.dim() - 1)
x = x.tolist()
mxx = mxx.tolist()
ixx = ixx.tolist()
Reported by Pylint.
torch/jit/_script.py
232 issues
Line: 39
Column: 1
from torch.overrides import (
has_torch_function, has_torch_function_unary, has_torch_function_variadic)
from torch.package import PackageExporter, PackageImporter
from ._serialization import validate_map_location
from torch.jit._monkeytype_config import (
monkeytype_trace,
JitTypeTraceConfig ,
JitTypeTraceStore
Reported by Pylint.
Line: 238
Column: 5
# parameters are initialized _before_ the script compiler resolve references to
# `self.param` or `self.module`.
class ScriptMeta(type):
def __init__(cls, name, bases, attrs): # noqa: B902
# Aggregate all the ScriptMethods and constants from superclasses
cls._methods: Dict[str, Any] = {}
cls._constants_set = set(getattr(cls, "__constants__", ()))
for base in reversed(bases):
for k, v in getattr(base, "_methods", {}).items():
Reported by Pylint.
Line: 298
Column: 16
class _CachedForward(object):
def __get__(self, obj, cls):
return self.__getattr__("forward") # type: ignore[attr-defined]
class ScriptWarning(Warning):
pass
Reported by Pylint.
Line: 410
Column: 24
def __getattr__(self, attr):
if "_initializing" in self.__dict__ and self.__dict__["_initializing"]:
return super(RecursiveScriptClass, self).__getattr__(attr) # type: ignore[misc]
if attr in self._props:
return self._props[attr].fget()
return getattr(self._c, attr)
Reported by Pylint.
Line: 50
Column: 1
type_trace_db = JitTypeTraceStore() # DB to hold all call traces from MonkeyType
torch._C.ScriptMethod.graph_for = _graph_for # type: ignore[attr-defined]
torch._C.ScriptFunction.graph_for = _graph_for # type: ignore[attr-defined]
ScriptFunction = torch._C.ScriptFunction
ScriptFunction.__doc__ = """
Functionally equivalent to a :class:`ScriptModule`, but represents a single
function and does not have any attributes or Parameters.
Reported by Pylint.
Line: 51
Column: 1
type_trace_db = JitTypeTraceStore() # DB to hold all call traces from MonkeyType
torch._C.ScriptMethod.graph_for = _graph_for # type: ignore[attr-defined]
torch._C.ScriptFunction.graph_for = _graph_for # type: ignore[attr-defined]
ScriptFunction = torch._C.ScriptFunction
ScriptFunction.__doc__ = """
Functionally equivalent to a :class:`ScriptModule`, but represents a single
function and does not have any attributes or Parameters.
"""
Reported by Pylint.
Line: 52
Column: 18
torch._C.ScriptMethod.graph_for = _graph_for # type: ignore[attr-defined]
torch._C.ScriptFunction.graph_for = _graph_for # type: ignore[attr-defined]
ScriptFunction = torch._C.ScriptFunction
ScriptFunction.__doc__ = """
Functionally equivalent to a :class:`ScriptModule`, but represents a single
function and does not have any attributes or Parameters.
"""
set_module(ScriptFunction, "torch.jit")
Reported by Pylint.
Line: 64
Column: 26
Attribute = collections.namedtuple("Attribute", ["value", "type"])
else:
def Attribute(value, type): # type: ignore[no-redef]
return value
Attribute.__doc__ = """
This method is a pass-through function that returns `value`, mostly
used to indicate to the TorchScript compiler that the left-hand side
Reported by Pylint.
Line: 64
Column: 26
Attribute = collections.namedtuple("Attribute", ["value", "type"])
else:
def Attribute(value, type): # type: ignore[no-redef]
return value
Attribute.__doc__ = """
This method is a pass-through function that returns `value`, mostly
used to indicate to the TorchScript compiler that the left-hand side
Reported by Pylint.
Line: 126
Column: 34
return type_trace_db
# Gets a function from the name of a method on a type
def _get_function_from_type(cls, name):
return getattr(cls, name, None)
# ScriptClasses must be new-style classes because we construct them using their
# __new__ method.
Reported by Pylint.
test/test_serialization.py
229 issues
Line: 1
Column: 1
import torch
import unittest
import io
import tempfile
import os
import sys
import zipfile
import warnings
import gzip
Reported by Pylint.
Line: 15
Column: 1
import shutil
import pathlib
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, \
TEST_DILL, run_tests, download_file, BytesIOContext, TemporaryFileName
Reported by Pylint.
Line: 16
Column: 1
import pathlib
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, \
TEST_DILL, run_tests, download_file, BytesIOContext, TemporaryFileName
from torch.testing._internal.common_device_type import instantiate_device_type_tests
Reported by Pylint.
Line: 17
Column: 1
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, \
TEST_DILL, run_tests, download_file, BytesIOContext, TemporaryFileName
from torch.testing._internal.common_device_type import instantiate_device_type_tests
Reported by Pylint.
Line: 19
Column: 1
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, \
TEST_DILL, run_tests, download_file, BytesIOContext, TemporaryFileName
from torch.testing._internal.common_device_type import instantiate_device_type_tests
# These tests were all copied from `test/test_torch.py` at some point, so see
# the actual blame, see this revision
Reported by Pylint.
Line: 21
Column: 1
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, \
TEST_DILL, run_tests, download_file, BytesIOContext, TemporaryFileName
from torch.testing._internal.common_device_type import instantiate_device_type_tests
# These tests were all copied from `test/test_torch.py` at some point, so see
# the actual blame, see this revision
# https://github.com/pytorch/pytorch/blame/9a2691f2fc948b9792686085b493c61793c2de30/test/test_torch.py
Reported by Pylint.
Line: 28
Column: 5
# https://github.com/pytorch/pytorch/blame/9a2691f2fc948b9792686085b493c61793c2de30/test/test_torch.py
if TEST_DILL:
import dill
HAS_DILL_AT_LEAST_0_3_1 = check_module_version_greater_or_equal(dill, (0, 3, 1))
else:
HAS_DILL_AT_LEAST_0_3_1 = False
can_retrieve_source = True
Reported by Pylint.
Line: 457
Column: 9
self.assertTrue(expected_superset.issuperset(filemock.calls))
# Reset between save and load
filemock.seek(0)
filemock.calls.clear()
_ = torch.load(filemock)
expected_superset = {'read', 'readline', 'seek', 'tell'}
self.assertTrue(expected_superset.issuperset(filemock.calls))
Reported by Pylint.
Line: 109
Column: 43
# check that serializing the same storage view object unpickles
# it as one object not two (and vice versa)
views = c[7]
self.assertEqual(views[0]._cdata, views[1]._cdata)
self.assertEqual(views[0], views[2])
self.assertNotEqual(views[0]._cdata, views[2]._cdata)
rootview = c[8]
self.assertEqual(rootview.data_ptr(), c[0].data_ptr())
Reported by Pylint.
Line: 109
Column: 26
# check that serializing the same storage view object unpickles
# it as one object not two (and vice versa)
views = c[7]
self.assertEqual(views[0]._cdata, views[1]._cdata)
self.assertEqual(views[0], views[2])
self.assertNotEqual(views[0]._cdata, views[2]._cdata)
rootview = c[8]
self.assertEqual(rootview.data_ptr(), c[0].data_ptr())
Reported by Pylint.
caffe2/python/nomnigraph_test.py
227 issues
Line: 10
Column: 1
from caffe2.proto import caffe2_pb2
import caffe2.python.nomnigraph as ng
from hypothesis import given
import hypothesis.strategies as st
import random
class TestBindings(test_util.TestCase):
Reported by Pylint.
Line: 11
Column: 1
import caffe2.python.nomnigraph as ng
from hypothesis import given
import hypothesis.strategies as st
import random
class TestBindings(test_util.TestCase):
def test_simple(self):
Reported by Pylint.
Line: 255
Column: 9
g = ng.Graph()
n1 = g.createNode("hello1")
n2 = g.createNode("hello2")
e = g.createEdge(n1, n2)
ng.render(g)
def test_createUniqueDataNode(self):
net = core.Net("name")
nn = ng.NNModule(net)
Reported by Pylint.
Line: 440
Column: 9
net = core.Net("name")
net.FC(["X", "W"], ["Y"])
d = caffe2_pb2.DeviceOption()
nn = ng.NNModule(net, {"X": d, "W": d})
with self.assertRaises(Exception):
nn = ng.NNModule(net, {"X": d, "Fake": d})
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core, test_util
from caffe2.proto import caffe2_pb2
import caffe2.python.nomnigraph as ng
Reported by Pylint.
Line: 12
Column: 1
from hypothesis import given
import hypothesis.strategies as st
import random
class TestBindings(test_util.TestCase):
def test_simple(self):
nn = ng.NNModule()
Reported by Pylint.
Line: 15
Column: 1
import random
class TestBindings(test_util.TestCase):
def test_simple(self):
nn = ng.NNModule()
dfg = nn.dataFlow
dfg.createNode(ng.NeuralNetData("X"))
dfg.createNode(ng.NeuralNetOperator("FC"))
Reported by Pylint.
Line: 15
Column: 1
import random
class TestBindings(test_util.TestCase):
def test_simple(self):
nn = ng.NNModule()
dfg = nn.dataFlow
dfg.createNode(ng.NeuralNetData("X"))
dfg.createNode(ng.NeuralNetOperator("FC"))
Reported by Pylint.
Line: 16
Column: 5
class TestBindings(test_util.TestCase):
def test_simple(self):
nn = ng.NNModule()
dfg = nn.dataFlow
dfg.createNode(ng.NeuralNetData("X"))
dfg.createNode(ng.NeuralNetOperator("FC"))
assert len(nn.dataFlow.getMutableNodes()) == 2
Reported by Pylint.
Line: 16
Column: 5
class TestBindings(test_util.TestCase):
def test_simple(self):
nn = ng.NNModule()
dfg = nn.dataFlow
dfg.createNode(ng.NeuralNetData("X"))
dfg.createNode(ng.NeuralNetOperator("FC"))
assert len(nn.dataFlow.getMutableNodes()) == 2
Reported by Pylint.
caffe2/python/optimizer.py
226 issues
Line: 43
Column: 5
self._local_lr_multiplier = None
self._local_lr_multiplier_on_gpu = False
"""
Adds optimization operators to the net for given parameter and its gradient
Parameter is specified by either 'param' being a ParameterInfo object.
In this case param.grad has to be set
Or by 'param' being a BlobReference and 'grad' being a BlobReference for its
Reported by Pylint.
Line: 222
Column: 3
"""
return self._aux_params
# TODO(xlwang): In transfer learning, parameter initialized from pretrained
# model might require a different learning rate than otherwise initialized.
# To this end, here we implement a python solution where
# `base_learning_rate` is scaled by `scale`, by calling
# `scale_learning_rate`; Alternatively, we can achieve same effect by
# rewriting the LearningRate operator in C++
Reported by Pylint.
Line: 275
Column: 3
self._clear_local_lr_multiplier()
# TODO(zqq): support LARS for sparse parameters
if self.lars is not None and not isinstance(grad, core.GradientSlice):
assert self.lars >= 0, "Lars offset must be nonnegative, got {}".format(
self.lars
)
wd, trust, lr_max = self.create_lars_inputs(
Reported by Pylint.
Line: 356
Column: 5
net.WeightedSum([param, ONE, grad, coeff], param)
def scale_learning_rate(self, scale):
self.base_learning_rate *= scale
return
class MultiPrecisionSgdOptimizer(SgdOptimizer):
Reported by Pylint.
Line: 452
Column: 5
)
self.weight_decay = weight_decay
def _run(self, net, param_init_net, param_info, fp32_update=False):
fp32_update_flag = 0
param_name = str(param_info.blob)
# should only be triggered in FP16 training by SpatialBN, which
Reported by Pylint.
Line: 539
Column: 1
)
class WeightDecayBuilder(Optimizer):
def __init__(self, weight_decay):
self.weight_decay = weight_decay
def _run(self, net, param_init_net, param_info):
dev = scope.CurrentDeviceScope()
Reported by Pylint.
Line: 540
Column: 5
class WeightDecayBuilder(Optimizer):
def __init__(self, weight_decay):
self.weight_decay = weight_decay
def _run(self, net, param_init_net, param_info):
dev = scope.CurrentDeviceScope()
if dev is None:
Reported by Pylint.
Line: 588
Column: 25
**kwargs
):
for k, v in locals().items():
logger.info("AdagradOptimizer: input arguments: {}: {}".format(k, v))
super(AdagradOptimizer, self).__init__()
self.alpha = alpha
self.epsilon = epsilon
self.decay = decay
Reported by Pylint.
Line: 739
Column: 17
if self.rowWise:
logger.info(
"Using engine {} for rowWise Adagrad to train param {}".format(
self.engine, param
)
)
shapes, types = workspace.InferShapesAndTypes([param_init_net])
Reported by Pylint.
Line: 744
Column: 21
)
)
shapes, types = workspace.InferShapesAndTypes([param_init_net])
if str(param) not in shapes:
# Type/shape inference is not available for this param, fallback
# on Shape/Slice logic
shape = param_init_net.Shape(param, str(param) + "_shape")
num_rows = param_init_net.Slice(
Reported by Pylint.