The following issues were found
torch/_jit_internal.py
185 issues
Line: 34
Column: 5
if sys.version_info[:2] > (3, 7):
from typing import Final
else:
from typing_extensions import Final
LockType: Type
try:
import _thread
LockType = _thread.LockType
Reported by Pylint.
Line: 927
Column: 5
if torch.distributed.rpc.is_available():
from torch.distributed.rpc import RRef
from torch._C._distributed_rpc import PyRRef
def is_rref(ann) -> bool:
if ann is RRef:
raise RuntimeError(
"Attempted to use RRef without a "
Reported by Pylint.
Line: 29
Column: 1
from torch._sources import get_source_lines_and_file, parse_def, fake_range
from torch.futures import Future
import torch.package._mangling as package_mangling
from typing import Any, Callable, Dict, Generic, List, Optional, Tuple, Type, TypeVar, Union # noqa: F401
if sys.version_info[:2] > (3, 7):
from typing import Final
else:
from typing_extensions import Final
Reported by Pylint.
Line: 29
Column: 1
from torch._sources import get_source_lines_and_file, parse_def, fake_range
from torch.futures import Future
import torch.package._mangling as package_mangling
from typing import Any, Callable, Dict, Generic, List, Optional, Tuple, Type, TypeVar, Union # noqa: F401
if sys.version_info[:2] > (3, 7):
from typing import Final
else:
from typing_extensions import Final
Reported by Pylint.
Line: 69
Column: 9
return getattr(module, qualified_name)
def parseNestedExpr(expr, module) -> Tuple[Any, int]:
i = 0
while i < len(expr) and expr[i] not in (',', '[', ']'):
i += 1
# Special case logic for the empty Tuple as a subscript (used
# in the type annotation `Tuple[()]`)
Reported by Pylint.
Line: 101
Column: 16
value, len_parsed = parseNestedExpr(expr, module)
assert len_parsed == len(expr), "whole expression was not parsed, falling back to c++ parser"
return value
except Exception:
"""
The python resolver fails in several cases in known unit tests, and is intended
to fall back gracefully to the c++ resolver in general. For example, python 2 style
annotations which are frequent in our unit tests often fail with types e.g. int not
resolvable from the calling frame.
Reported by Pylint.
Line: 102
Column: 13
assert len_parsed == len(expr), "whole expression was not parsed, falling back to c++ parser"
return value
except Exception:
"""
The python resolver fails in several cases in known unit tests, and is intended
to fall back gracefully to the c++ resolver in general. For example, python 2 style
annotations which are frequent in our unit tests often fail with types e.g. int not
resolvable from the calling frame.
"""
Reported by Pylint.
Line: 141
Column: 5
baz()
"""
frame = inspect.currentframe()
i = 0
while i < frames_up + 1:
assert frame is not None
frame = frame.f_back
i += 1
Reported by Pylint.
Line: 276
Column: 12
# inspect.signature may fail, give up in that case.
try:
callable_signature = inspect.signature(fn)
except Exception:
return []
argument_names = []
for name, param in callable_signature.parameters.items():
# All four other types of arguments do not map to individual values
Reported by Pylint.
Line: 507
Column: 5
# any compiled methods and wasn't decorated with `@torch.jit.export`
m = torch.jit.script(MyModule())
"""
fn._torchscript_modifier = FunctionModifiers.EXPORT
return fn
def unused(fn):
"""
Reported by Pylint.
caffe2/python/operator_test/reduce_ops_test.py
182 issues
Line: 7
Column: 1
from caffe2.python import core, workspace
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 11
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import itertools as it
class TestReduceOps(serial.SerializedTestCase):
Reported by Pylint.
Line: 228
Column: 57
@given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
@settings(deadline=10000)
def test_reduce_front_sum(self, num_reduce_dim, gc, dc):
X = np.random.rand(7, 4, 3, 5).astype(np.float32)
def ref_sum(X):
return [np.sum(X, axis=(tuple(range(num_reduce_dim))))]
Reported by Pylint.
Line: 240
Column: 75
"ReduceFrontSumGradient", X, ref_sum, num_reduce_dim)
@given(num_reduce_dim=st.integers(0, 4), seed=st.integers(0, 4), **hu.gcs)
def test_reduce_front_sum_empty_batch(self, num_reduce_dim, seed, gc, dc):
np.random.seed(seed)
X = np.random.rand(0, 4, 3, 5).astype(np.float32)
def ref_sum(X):
return [np.sum(X, axis=(tuple(range(num_reduce_dim))))]
Reported by Pylint.
Line: 275
Column: 49
@given(**hu.gcs)
@settings(deadline=None)
def test_reduce_front_sum_with_length(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][num_reduce_dim:]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
Reported by Pylint.
Line: 295
Column: 58
@given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
@settings(deadline=10000)
def test_reduce_front_mean(self, num_reduce_dim, gc, dc):
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_mean(X):
return [np.mean(X, axis=(tuple(range(num_reduce_dim))))]
Reported by Pylint.
Line: 308
Column: 50
@given(**hu.gcs)
@settings(deadline=10000)
def test_reduce_front_mean_with_length(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][num_reduce_dim:]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
Reported by Pylint.
Line: 386
Column: 36
@given(**hu.gcs)
@settings(deadline=10000)
def test_reduce_back_sum(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_sum(X):
return [np.sum(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]
Reported by Pylint.
Line: 400
Column: 48
@given(**hu.gcs)
@settings(deadline=10000)
def test_reduce_back_sum_with_length(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][:4 - num_reduce_dim]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
Reported by Pylint.
Line: 420
Column: 53
@given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
@settings(deadline=10000)
def test_reduce_back_mean(self, num_reduce_dim, dc, gc):
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_mean(X):
return [np.mean(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]
Reported by Pylint.
tools/codegen/gen.py
181 issues
Line: 3
Column: 1
import os
from typing import List, Dict, Optional, Tuple, Set, Callable, Any, Union, Sequence, TypeVar
from typing_extensions import Literal
import yaml
from collections import OrderedDict, defaultdict, namedtuple
import argparse
import pathlib
import functools
import json
Reported by Pylint.
Line: 82
Column: 5
# Parse native_functions.yaml into a sequence of NativeFunctions and Backend Indices.
ParsedYaml = namedtuple('ParsedYaml', ['native_functions', 'backend_indices'])
def parse_native_yaml(path: str) -> ParsedYaml:
global _GLOBAL_PARSE_NATIVE_YAML_CACHE
if path not in _GLOBAL_PARSE_NATIVE_YAML_CACHE:
with open(path, 'r') as f:
es = yaml.load(f, Loader=LineLoader)
assert isinstance(es, list)
rs: List[NativeFunction] = []
Reported by Pylint.
Line: 85
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b506_yaml_load.html
global _GLOBAL_PARSE_NATIVE_YAML_CACHE
if path not in _GLOBAL_PARSE_NATIVE_YAML_CACHE:
with open(path, 'r') as f:
es = yaml.load(f, Loader=LineLoader)
assert isinstance(es, list)
rs: List[NativeFunction] = []
bs: Dict[DispatchKey, Dict[OperatorName, BackendMetadata]] = defaultdict(dict)
for e in es:
assert isinstance(e.get('__line__'), int), e
Reported by Bandit.
Line: 93
Column: 50
assert isinstance(e.get('__line__'), int), e
loc = Location(path, e['__line__'])
funcs = e.get('func')
with context(lambda: f'in {loc}:\n {funcs}'):
func, m = NativeFunction.from_yaml(e, loc)
rs.append(func)
BackendIndex.grow_index(bs, m)
error_check_native_functions(rs)
# Default dict is to prevent the codegen from barfing when we have a dispatch key that has no kernels yet.
Reported by Pylint.
Line: 93
Column: 40
assert isinstance(e.get('__line__'), int), e
loc = Location(path, e['__line__'])
funcs = e.get('func')
with context(lambda: f'in {loc}:\n {funcs}'):
func, m = NativeFunction.from_yaml(e, loc)
rs.append(func)
BackendIndex.grow_index(bs, m)
error_check_native_functions(rs)
# Default dict is to prevent the codegen from barfing when we have a dispatch key that has no kernels yet.
Reported by Pylint.
Line: 177
Column: 3
exprs_str = ', '.join(a.expr for a in exprs)
if f.structured_delegate is not None:
# TODO: for ops with structured_delegate it should check the dispatch table of
# the out variant instead. For now, these structured ops all have CPU/CUDA kernels
# so we always dispatch to the `backend`, but this could be wrong when we
# migrate math/default_backend ops to use structured delegate.
return f'return at::{backend_index.dispatch_key.lower()}::{name}({exprs_str});'
Reported by Pylint.
Line: 441
Column: 3
# Generates ATenOpList.cpp, a runtime accessible list of all aten
# operators.
# TODO: This was historically used to help some JIT interop code
# figure out whether or not to treat aten namespace'd operators
# one way or another, we should reevaluate if this is actually needed.
@with_native_function
def compute_aten_op(f: NativeFunction) -> str:
return f'{{"aten::{f.func.name.name}", "{f.func.name.overload_name}"}},'
Reported by Pylint.
Line: 578
Column: 3
# the C++ API argument type, except that Tensor and Tensor?
# arguments simply present as Tensor.
#
# TODO: Get rid of dynamic_type, after getting tools/autograd
# to use the new codegen framework
def dynamic_type(t: Type) -> str:
if isinstance(t, OptionalType):
return dynamic_type(t.elem)
# Note we don't use t.is_tensor_like() here because it would
Reported by Pylint.
Line: 683
Column: 43
cpp_a.argument, schema_order=schema_order,
kwarg_only_set=kwarg_only_set, out_arg_set=out_arg_set, name_to_field_name=name_to_field_name)
def compute_argument_yaml(a: Argument, *, schema_order: bool, kwarg_only_set: Set[str],
out_arg_set: Set[str], name_to_field_name: Dict[str, str]) -> object:
arg: Dict[str, object] = {
'annotation': str(a.annotation) if a.annotation else None,
'dynamic_type': dynamic_type(a.type),
'is_nullable': a.type.is_nullable(),
Reported by Pylint.
Line: 785
Column: 3
args_str = ', '.join(a.no_default().decl_registration_declarations() for a in args)
comment_data : Dict[str, str] = {
'schema': f'aten::{f.func}',
# TODO: What exactly is the semantics of the 'dispatch' field?
'dispatch': str({k for k, v in backend_indices.items() if v.has_kernel(f)} != {DispatchKey.CompositeImplicitAutograd}),
'default': str(f.has_composite_kernel or has_autogenerated_composite_kernel(f))
}
return f"""{returns_type} {name}({args_str}); // {json.dumps(comment_data)}
"""
Reported by Pylint.
torch/jit/quantized.py
181 issues
Line: 18
Column: 70
self.in_features = other.in_features
self.out_features = other.out_features
# Quantize weight and discard the original
self.weight, self.col_offsets, self.scale, self.zero_point = torch.fbgemm_linear_quantize_weight(
other.weight.clone(memory_format=torch.contiguous_format).float())
self.weight = torch.nn.Parameter(self.weight, requires_grad=False)
self.col_offsets = torch.nn.Parameter(self.col_offsets, requires_grad=False)
assert other.bias is not None, 'QuantizedLinear requires a bias'
self.bias = torch.nn.Parameter(other.bias.clone(memory_format=torch.contiguous_format).float(), requires_grad=False)
Reported by Pylint.
Line: 19
Column: 46
self.out_features = other.out_features
# Quantize weight and discard the original
self.weight, self.col_offsets, self.scale, self.zero_point = torch.fbgemm_linear_quantize_weight(
other.weight.clone(memory_format=torch.contiguous_format).float())
self.weight = torch.nn.Parameter(self.weight, requires_grad=False)
self.col_offsets = torch.nn.Parameter(self.col_offsets, requires_grad=False)
assert other.bias is not None, 'QuantizedLinear requires a bias'
self.bias = torch.nn.Parameter(other.bias.clone(memory_format=torch.contiguous_format).float(), requires_grad=False)
Reported by Pylint.
Line: 23
Column: 71
self.weight = torch.nn.Parameter(self.weight, requires_grad=False)
self.col_offsets = torch.nn.Parameter(self.col_offsets, requires_grad=False)
assert other.bias is not None, 'QuantizedLinear requires a bias'
self.bias = torch.nn.Parameter(other.bias.clone(memory_format=torch.contiguous_format).float(), requires_grad=False)
self.register_buffer(
'packed_tensor_ptr',
torch.fbgemm_pack_quantized_matrix(self.weight.clone(memory_format=torch.contiguous_format)))
Reported by Pylint.
Line: 27
Column: 13
self.register_buffer(
'packed_tensor_ptr',
torch.fbgemm_pack_quantized_matrix(self.weight.clone(memory_format=torch.contiguous_format)))
@torch.jit.script_method
def _unpack(self):
self.packed_tensor_ptr.set_(
torch.fbgemm_pack_quantized_matrix(self.weight))
Reported by Pylint.
Line: 27
Column: 80
self.register_buffer(
'packed_tensor_ptr',
torch.fbgemm_pack_quantized_matrix(self.weight.clone(memory_format=torch.contiguous_format)))
@torch.jit.script_method
def _unpack(self):
self.packed_tensor_ptr.set_(
torch.fbgemm_pack_quantized_matrix(self.weight))
Reported by Pylint.
Line: 32
Column: 13
@torch.jit.script_method
def _unpack(self):
self.packed_tensor_ptr.set_(
torch.fbgemm_pack_quantized_matrix(self.weight))
@torch.jit.script_method
def _pack(self):
self.packed_tensor_ptr.set_(
torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach())
Reported by Pylint.
Line: 37
Column: 13
@torch.jit.script_method
def _pack(self):
self.packed_tensor_ptr.set_(
torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach())
@torch.jit.script_method
def forward(self, input):
out = torch.fbgemm_linear_int8_weight_fp32_activation(
input.float(), self.weight, self.packed_tensor_ptr, self.col_offsets,
Reported by Pylint.
Line: 37
Column: 66
@torch.jit.script_method
def _pack(self):
self.packed_tensor_ptr.set_(
torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach())
@torch.jit.script_method
def forward(self, input):
out = torch.fbgemm_linear_int8_weight_fp32_activation(
input.float(), self.weight, self.packed_tensor_ptr, self.col_offsets,
Reported by Pylint.
Line: 41
Column: 15
@torch.jit.script_method
def forward(self, input):
out = torch.fbgemm_linear_int8_weight_fp32_activation(
input.float(), self.weight, self.packed_tensor_ptr, self.col_offsets,
self.scale, self.zero_point, self.bias)
return out.to(input.dtype)
def extra_repr(self):
Reported by Pylint.
Line: 59
Column: 23
self.in_features = other.in_features
self.out_features = other.out_features
self.original_weight = other.weight
self.weight = torch.fbgemm_pack_gemm_matrix_fp16(
other.weight.clone(memory_format=torch.contiguous_format).float())
assert other.bias is not None, 'QuantizedLinearFP16 requires a bias'
self.bias = torch.nn.Parameter(other.bias.clone(memory_format=torch.contiguous_format).float(), requires_grad=False)
self.register_buffer('packed_weight', self.weight)
Reported by Pylint.
caffe2/python/schema_test.py
179 issues
Line: 233
Column: 13
def testAssignToField(self):
with self.assertRaises(TypeError):
s = schema.Struct(('a', schema.Scalar()))
s.a = schema.Scalar()
def testPreservesEmptyFields(self):
s = schema.Struct(
('a', schema.Scalar(np.float32)),
('b', schema.Struct()),
Reported by Pylint.
Line: 16
Column: 31
class TestField(unittest.TestCase):
def testInitShouldSetEmptyParent(self):
f = schema.Field([])
self.assertTupleEqual(f._parent, (None, 0))
def testInitShouldSetFieldOffsets(self):
f = schema.Field([
schema.Scalar(dtype=np.int32),
schema.Struct(
Reported by Pylint.
Line: 32
Column: 30
),
schema.Scalar(dtype=np.int32),
])
self.assertListEqual(f._field_offsets, [0, 1, 4, 5, 8, 9])
def testInitShouldSetFieldOffsetsIfNoChildren(self):
f = schema.Field([])
self.assertListEqual(f._field_offsets, [0])
Reported by Pylint.
Line: 36
Column: 30
def testInitShouldSetFieldOffsetsIfNoChildren(self):
f = schema.Field([])
self.assertListEqual(f._field_offsets, [0])
class TestDB(unittest.TestCase):
def testPicklable(self):
s = schema.Struct(
Reported by Pylint.
Line: 45
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b301-pickle
('field1', schema.Scalar(dtype=np.int32)),
('field2', schema.List(schema.Scalar(dtype=str)))
)
s2 = pickle.loads(pickle.dumps(s))
for r in (s, s2):
self.assertTrue(isinstance(r.field1, schema.Scalar))
self.assertTrue(isinstance(r.field2, schema.List))
self.assertTrue(getattr(r, 'non_existent', None) is None)
Reported by Bandit.
Line: 85
Column: 9
def testNormalizeField(self):
s = schema.Struct(('field1', np.int32), ('field2', str))
self.assertEquals(
s,
schema.Struct(
('field1', schema.Scalar(dtype=np.int32)),
('field2', schema.Scalar(dtype=str))
)
Reported by Pylint.
Line: 100
Column: 9
('field_1', schema.Scalar(dtype=np.str)),
('field_2', schema.Scalar(dtype=np.float32))
)
self.assertEquals(s, s2)
self.assertEquals(s[0], schema.Scalar(dtype=np.int32))
self.assertEquals(s[1], schema.Scalar(dtype=np.str))
self.assertEquals(s[2], schema.Scalar(dtype=np.float32))
self.assertEquals(
s[2, 0],
Reported by Pylint.
Line: 101
Column: 9
('field_2', schema.Scalar(dtype=np.float32))
)
self.assertEquals(s, s2)
self.assertEquals(s[0], schema.Scalar(dtype=np.int32))
self.assertEquals(s[1], schema.Scalar(dtype=np.str))
self.assertEquals(s[2], schema.Scalar(dtype=np.float32))
self.assertEquals(
s[2, 0],
schema.Struct(
Reported by Pylint.
Line: 102
Column: 9
)
self.assertEquals(s, s2)
self.assertEquals(s[0], schema.Scalar(dtype=np.int32))
self.assertEquals(s[1], schema.Scalar(dtype=np.str))
self.assertEquals(s[2], schema.Scalar(dtype=np.float32))
self.assertEquals(
s[2, 0],
schema.Struct(
('field_2', schema.Scalar(dtype=np.float32)),
Reported by Pylint.
Line: 103
Column: 9
self.assertEquals(s, s2)
self.assertEquals(s[0], schema.Scalar(dtype=np.int32))
self.assertEquals(s[1], schema.Scalar(dtype=np.str))
self.assertEquals(s[2], schema.Scalar(dtype=np.float32))
self.assertEquals(
s[2, 0],
schema.Struct(
('field_2', schema.Scalar(dtype=np.float32)),
('field_0', schema.Scalar(dtype=np.int32)),
Reported by Pylint.
test/distributed/test_c10d_common.py
178 issues
Line: 11
Column: 1
from itertools import product
from sys import platform
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 12
Column: 1
from sys import platform
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 18
Column: 1
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
Reported by Pylint.
Line: 19
Column: 1
sys.exit(0)
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
Reported by Pylint.
Line: 20
Column: 1
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
Reported by Pylint.
Line: 21
Column: 1
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
)
Reported by Pylint.
Line: 22
Column: 1
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
)
from torch.testing._internal.common_utils import (
Reported by Pylint.
Line: 23
Column: 1
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
)
from torch.testing._internal.common_utils import (
TestCase,
Reported by Pylint.
Line: 24
Column: 1
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
Reported by Pylint.
Line: 27
Column: 1
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_TSAN,
)
Reported by Pylint.
test/quantization/core/test_workflow_module.py
177 issues
Line: 2
Column: 1
# Torch
import torch
from torch.quantization import (
MinMaxObserver,
PerChannelMinMaxObserver,
MovingAverageMinMaxObserver,
MovingAveragePerChannelMinMaxObserver,
HistogramObserver,
RecordingObserver,
Reported by Pylint.
Line: 3
Column: 1
# Torch
import torch
from torch.quantization import (
MinMaxObserver,
PerChannelMinMaxObserver,
MovingAverageMinMaxObserver,
MovingAveragePerChannelMinMaxObserver,
HistogramObserver,
RecordingObserver,
Reported by Pylint.
Line: 24
Column: 1
FusedMovingAvgObsFakeQuantize,
)
import torch.nn as nn
# Standard library
import copy
import io
import itertools
Reported by Pylint.
Line: 35
Column: 1
import numpy as np
# Testing utils
from hypothesis import given, settings
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from torch.testing._internal.common_utils import TestCase
Reported by Pylint.
Line: 36
Column: 1
# Testing utils
from hypothesis import given, settings
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantization import (
Reported by Pylint.
Line: 37
Column: 1
# Testing utils
from hypothesis import given, settings
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
Reported by Pylint.
Line: 39
Column: 1
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
AnnotatedSingleLayerLinearModel,
test_only_eval_fn,
Reported by Pylint.
Line: 40
Column: 1
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
AnnotatedSingleLayerLinearModel,
test_only_eval_fn,
SingleLayerLinearModel,
Reported by Pylint.
Line: 41
Column: 1
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
AnnotatedSingleLayerLinearModel,
test_only_eval_fn,
SingleLayerLinearModel,
)
Reported by Pylint.
Line: 48
Column: 1
SingleLayerLinearModel,
)
from torch.testing._internal.common_quantized import (
override_quantized_engine,
supported_qengines,
override_qengines,
_fake_quantize_per_channel_affine_reference,
_fake_quantize_per_channel_affine_grad_reference,
Reported by Pylint.
test/test_tensorexpr_pybind.py
176 issues
Line: 1
Column: 1
import torch
import numpy as np
import torch._C._te as te
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
import unittest
LLVM_ENABLED = torch._C._llvm_enabled()
Reported by Pylint.
Line: 3
Column: 1
import torch
import numpy as np
import torch._C._te as te
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
import unittest
LLVM_ENABLED = torch._C._llvm_enabled()
Reported by Pylint.
Line: 5
Column: 1
import numpy as np
import torch._C._te as te
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
import unittest
LLVM_ENABLED = torch._C._llvm_enabled()
Reported by Pylint.
Line: 6
Column: 1
import torch._C._te as te
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.jit_utils import JitTestCase
import unittest
LLVM_ENABLED = torch._C._llvm_enabled()
Reported by Pylint.
Line: 9
Column: 16
from torch.testing._internal.jit_utils import JitTestCase
import unittest
LLVM_ENABLED = torch._C._llvm_enabled()
class kernel_arena_scope(object):
def __enter__(self):
self.scope = torch._C._te.KernelScope()
Reported by Pylint.
Line: 9
Column: 16
from torch.testing._internal.jit_utils import JitTestCase
import unittest
LLVM_ENABLED = torch._C._llvm_enabled()
class kernel_arena_scope(object):
def __enter__(self):
self.scope = torch._C._te.KernelScope()
Reported by Pylint.
Line: 14
Column: 9
class kernel_arena_scope(object):
def __enter__(self):
self.scope = torch._C._te.KernelScope()
def __exit__(self, typ, val, traceback):
self.scope = None
Reported by Pylint.
Line: 17
Column: 9
self.scope = torch._C._te.KernelScope()
def __exit__(self, typ, val, traceback):
self.scope = None
def construct_adder(n: int, dtype=te.Dtype.Float):
dN = te.ExprHandle.int(n)
A = te.Placeholder('A', dtype, [dN])
Reported by Pylint.
Line: 138
Column: 17
%3 : Float(4, 4, strides=[4, 1], requires_grad=0, device=cpu) = aten::add(%7, %c.1, %6)
return (%3)
"""
graph = torch._C.parse_ir(graph_str)
kernel = torch._C._te.TensorExprKernel(graph)
res1 = kernel.run((x, y, z))
res2 = kernel.fallback((x, y, z))
correct = f(x, y, z)
Reported by Pylint.
Line: 140
Column: 18
"""
graph = torch._C.parse_ir(graph_str)
kernel = torch._C._te.TensorExprKernel(graph)
res1 = kernel.run((x, y, z))
res2 = kernel.fallback((x, y, z))
correct = f(x, y, z)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
Reported by Pylint.
caffe2/python/operator_test/softmax_ops_test.py
175 issues
Line: 7
Column: 1
from caffe2.python import core, workspace
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 22
Column: 46
D=st.sampled_from([0, 4, 8, 64, 79, 256, 333]),
engine=st.sampled_from([None, 'CUDNN']),
**hu.gcs)
def test_softmax(self, n, D, engine, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
Reported by Pylint.
Line: 65
Column: 51
engine=st.sampled_from([None, 'CUDNN']),
**hu.gcs)
@settings(deadline=10000)
def test_softmax_grad(self, n, D, engine, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
Y = np.random.rand(n, D).astype(np.float32)
dY = np.random.rand(n, D).astype(np.float32)
Y = Y + 1e-2
Reported by Pylint.
Line: 73
Column: 32
Y = Y + 1e-2
# Reference implementation of cross entropy with soft labels
def label_softmax_grad(X, dY):
dX = Y * 0.0
for i in range(n):
d = np.dot(Y[i, :], dY[i, :])
dX[i, :] = Y[i, :] * (dY[i, :] - d)
return [dX]
Reported by Pylint.
Line: 97
Column: 51
@given(axis=st.integers(min_value=1, max_value=4),
engine=st.sampled_from([None, 'CUDNN']),
**hu.gcs)
def test_softmax_axis(self, axis, engine, gc, dc):
np.random.seed(1)
X = np.random.randn(1, 2, 3, 2, 1).astype(np.float32)
X = X + 1e-2
def prod(xs):
Reported by Pylint.
Line: 147
Column: 59
@given(n=st.integers(2, 10), D=st.integers(4, 16),
only_loss=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_softmax_with_loss(self, n, D, gc, only_loss, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
np.random.seed(2603)
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
Reported by Pylint.
Line: 201
Column: 13
@settings(deadline=10000)
def test_softmax_with_loss_axis_2(
self, n, D, only_loss, label_prob,
gc, dc
):
np.random.seed(2603)
X = np.random.rand(n, n, D).astype(np.float32)
X = X + 1e-2
Reported by Pylint.
Line: 261
Column: 48
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(**hu.gcs_gpu_only)
def test_softmax_with_loss_large(self, gc, dc):
np.random.seed(2603)
for n in [32]:
for D in [1000, 2000, 20000]:
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
Reported by Pylint.
Line: 275
Column: 39
# Reference implementation of cross entropy with soft labels
def label_softmax_crossent(X, label):
probs = np.zeros((n, D))
rowmax = np.zeros(n)
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
Reported by Pylint.
torch/nn/modules/activation.py
172 issues
Line: 6
Column: 1
import torch
from torch import Tensor
from .linear import NonDynamicallyQuantizableLinear
from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
from torch.nn.parameter import Parameter
from .module import Module
from .. import functional as F
Reported by Pylint.
Line: 9
Column: 1
from .linear import NonDynamicallyQuantizableLinear
from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
from torch.nn.parameter import Parameter
from .module import Module
from .. import functional as F
class Threshold(Module):
r"""Thresholds each element of the input Tensor.
Reported by Pylint.
Line: 10
Column: 1
from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
from torch.nn.parameter import Parameter
from .module import Module
from .. import functional as F
class Threshold(Module):
r"""Thresholds each element of the input Tensor.
Reported by Pylint.
Line: 291
Column: 16
"""
def forward(self, input: Tensor) -> Tensor:
return torch.sigmoid(input)
class Hardsigmoid(Module):
r"""Applies the element-wise function:
Reported by Pylint.
Line: 349
Column: 16
"""
def forward(self, input: Tensor) -> Tensor:
return torch.tanh(input)
class SiLU(Module):
r"""Applies the Sigmoid Linear Unit (SiLU) function, element-wise.
The SiLU function is also known as the swish function.
Reported by Pylint.
Line: 901
Column: 44
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.empty((embed_dim, embed_dim), **factory_kwargs))
self.k_proj_weight = Parameter(torch.empty((embed_dim, self.kdim), **factory_kwargs))
self.v_proj_weight = Parameter(torch.empty((embed_dim, self.vdim), **factory_kwargs))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty((3 * embed_dim, embed_dim), **factory_kwargs))
Reported by Pylint.
Line: 902
Column: 44
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.empty((embed_dim, embed_dim), **factory_kwargs))
self.k_proj_weight = Parameter(torch.empty((embed_dim, self.kdim), **factory_kwargs))
self.v_proj_weight = Parameter(torch.empty((embed_dim, self.vdim), **factory_kwargs))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty((3 * embed_dim, embed_dim), **factory_kwargs))
self.register_parameter('q_proj_weight', None)
Reported by Pylint.
Line: 903
Column: 44
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.empty((embed_dim, embed_dim), **factory_kwargs))
self.k_proj_weight = Parameter(torch.empty((embed_dim, self.kdim), **factory_kwargs))
self.v_proj_weight = Parameter(torch.empty((embed_dim, self.vdim), **factory_kwargs))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty((3 * embed_dim, embed_dim), **factory_kwargs))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
Reported by Pylint.
Line: 906
Column: 45
self.v_proj_weight = Parameter(torch.empty((embed_dim, self.vdim), **factory_kwargs))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty((3 * embed_dim, embed_dim), **factory_kwargs))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
Reported by Pylint.
Line: 912
Column: 43
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim, **factory_kwargs))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = NonDynamicallyQuantizableLinear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
if add_bias_kv:
Reported by Pylint.