The following issues were found
benchmarks/distributed/rpc/rl/agent.py
29 issues
Line: 5
Column: 1
import time
import threading
import torch
from torch.distributions import Categorical
import torch.distributed.rpc as rpc
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
Reported by Pylint.
Line: 6
Column: 1
import threading
import torch
from torch.distributions import Categorical
import torch.distributed.rpc as rpc
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
Reported by Pylint.
Line: 7
Column: 1
import torch
from torch.distributions import Categorical
import torch.distributed.rpc as rpc
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
Reported by Pylint.
Line: 8
Column: 1
import torch
from torch.distributions import Categorical
import torch.distributed.rpc as rpc
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
OBSERVER_NAME = "observer{}"
Reported by Pylint.
Line: 9
Column: 1
from torch.distributions import Categorical
import torch.distributed.rpc as rpc
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
OBSERVER_NAME = "observer{}"
Reported by Pylint.
Line: 10
Column: 1
import torch.distributed.rpc as rpc
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
OBSERVER_NAME = "observer{}"
Reported by Pylint.
Line: 77
Column: 9
out_features (int): Number of out features in the model
batch (bool): Whether to process and respond to observer requests as a batch or 1 at a time
"""
self.batch = batch
self.policy = Policy(reduce((lambda x, y: x * y), state_size), nlayers, out_features)
self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2)
self.batch_size = batch_size
for rank in range(batch_size):
Reported by Pylint.
Line: 78
Column: 9
batch (bool): Whether to process and respond to observer requests as a batch or 1 at a time
"""
self.batch = batch
self.policy = Policy(reduce((lambda x, y: x * y), state_size), nlayers, out_features)
self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2)
self.batch_size = batch_size
for rank in range(batch_size):
ob_info = rpc.get_worker_info(OBSERVER_NAME.format(rank + 2))
Reported by Pylint.
Line: 79
Column: 9
"""
self.batch = batch
self.policy = Policy(reduce((lambda x, y: x * y), state_size), nlayers, out_features)
self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2)
self.batch_size = batch_size
for rank in range(batch_size):
ob_info = rpc.get_worker_info(OBSERVER_NAME.format(rank + 2))
Reported by Pylint.
Line: 81
Column: 9
self.policy = Policy(reduce((lambda x, y: x * y), state_size), nlayers, out_features)
self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2)
self.batch_size = batch_size
for rank in range(batch_size):
ob_info = rpc.get_worker_info(OBSERVER_NAME.format(rank + 2))
self.rewards[ob_info.id] = []
Reported by Pylint.
torch/jit/__init__.py
29 issues
Line: 67
Column: 37
have a LiteScriptModule and want to get the currently present
list of ops call _export_operator_list instead.
"""
return torch._C._export_opnames(m._c)
# torch.jit.Error
Error = torch._C.JITException
set_module(Error, "torch.jit")
Reported by Pylint.
Line: 67
Column: 12
have a LiteScriptModule and want to get the currently present
list of ops call _export_operator_list instead.
"""
return torch._C._export_opnames(m._c)
# torch.jit.Error
Error = torch._C.JITException
set_module(Error, "torch.jit")
Reported by Pylint.
Line: 67
Column: 12
have a LiteScriptModule and want to get the currently present
list of ops call _export_operator_list instead.
"""
return torch._C._export_opnames(m._c)
# torch.jit.Error
Error = torch._C.JITException
set_module(Error, "torch.jit")
Reported by Pylint.
Line: 71
Column: 9
# torch.jit.Error
Error = torch._C.JITException
set_module(Error, "torch.jit")
# This is not perfect but works in common cases
Error.__name__ = "Error"
Error.__qualname__ = "Error"
Reported by Pylint.
Line: 78
Column: 14
Error.__qualname__ = "Error"
# for use in python if using annotate
def annotate(the_type, the_value):
"""
This method is a pass-through function that returns `the_value`, used to hint TorchScript
compiler the type of `the_value`. It is a no-op when running outside of TorchScript.
Though TorchScript can infer correct type for most Python expressions, there are some cases where
Reported by Pylint.
Line: 147
Column: 1
# for torch.jit.isinstance
def isinstance(obj, target_type):
"""
This function provides for conatiner type refinement in TorchScript. It can refine
parameterized containers of the List, Dict, Tuple, and Optional types. E.g. ``List[str]``,
``Dict[str, List[torch.Tensor]]``, ``Optional[Tuple[int,str,int]]``. It can also
refine basic types such as bools and ints that are available in TorchScript.
Reported by Pylint.
Line: 194
Column: 32
# Graph class, so mypy checks need to be skipped.
@contextmanager
def _hide_source_ranges() -> Iterator[None]:
old_enable_source_ranges = torch._C.Graph.global_print_source_ranges # type: ignore[attr-defined]
try:
torch._C.Graph.set_global_print_source_ranges(False) # type: ignore[attr-defined]
yield
finally:
torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges) # type: ignore[attr-defined]
Reported by Pylint.
Line: 196
Column: 9
def _hide_source_ranges() -> Iterator[None]:
old_enable_source_ranges = torch._C.Graph.global_print_source_ranges # type: ignore[attr-defined]
try:
torch._C.Graph.set_global_print_source_ranges(False) # type: ignore[attr-defined]
yield
finally:
torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges) # type: ignore[attr-defined]
Reported by Pylint.
Line: 199
Column: 9
torch._C.Graph.set_global_print_source_ranges(False) # type: ignore[attr-defined]
yield
finally:
torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges) # type: ignore[attr-defined]
if not torch._C._jit_init():
raise RuntimeError("JIT initialization failed")
Reported by Pylint.
Line: 202
Column: 8
torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges) # type: ignore[attr-defined]
if not torch._C._jit_init():
raise RuntimeError("JIT initialization failed")
Reported by Pylint.
test/jit/test_complexity.py
29 issues
Line: 5
Column: 1
import sys
import unittest
import torch
# as with test_jit tests, requires global dtype set
torch.set_default_dtype(torch.double)
# Make the helper files in test/ importable
Reported by Pylint.
Line: 13
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE
Reported by Pylint.
Line: 14
Column: 1
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE
def num_ifs_loops(graph):
Reported by Pylint.
Line: 16
Column: 1
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE
def num_ifs_loops(graph):
graph_str = str(graph)
# only look at body of graph
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
import unittest
import torch
# as with test_jit tests, requires global dtype set
torch.set_default_dtype(torch.double)
Reported by Pylint.
Line: 13
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE
Reported by Pylint.
Line: 14
Column: 1
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE
def num_ifs_loops(graph):
Reported by Pylint.
Line: 14
Column: 1
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE
def num_ifs_loops(graph):
Reported by Pylint.
Line: 15
Column: 1
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE
def num_ifs_loops(graph):
graph_str = str(graph)
Reported by Pylint.
Line: 16
Column: 1
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE
def num_ifs_loops(graph):
graph_str = str(graph)
# only look at body of graph
Reported by Pylint.
caffe2/quantization/server/conv_depthwise_dnnlowp_op_test.py
29 issues
Line: 6
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import (
check_quantized_results_close,
generate_conv_inputs,
Reported by Pylint.
Line: 15
Column: 1
generate_convnd_inputs,
run_conv_or_fc,
)
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
Reported by Pylint.
Line: 51
Column: 9
quantize_groupwise,
relu,
gc,
dc,
):
pad = 1
kernel = 3
dilation = 1
input_channels_per_group = 1
Reported by Pylint.
Line: 76
Column: 9
preserve_weight_sparsity=preserve_weight_sparsity,
)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
if relu:
op_engine_list = [
("Conv", ""),
Reported by Pylint.
Line: 208
Column: 9
preserve_weight_sparsity,
quantize_groupwise,
gc,
dc,
):
pad = 1
kernel = 3
dilation = 1
input_channels_per_group = 1
Reported by Pylint.
Line: 233
Column: 9
preserve_weight_sparsity=preserve_weight_sparsity,
)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
op = "ConvRelu" if fuse_relu else "Conv"
op_engine_list = [(op, ""), (op, "DNNLOWP"), ("Int8" + op, "DNNLOWP")]
Reported by Pylint.
Line: 1
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import (
Reported by Pylint.
Line: 22
Column: 1
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPOpConvDepthWiseTest(hu.HypothesisTestCase):
@given(
stride=st.integers(1, 2),
size=st.integers(10, 16),
# depthwise 3x3 fast path only works for a multiple of 8
group=st.sampled_from([8, 24, 32]),
Reported by Pylint.
Line: 37
Column: 5
relu=st.booleans(),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_depthwise_3x3_conv(
self,
stride,
size,
group,
Reported by Pylint.
Line: 37
Column: 5
relu=st.booleans(),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_depthwise_3x3_conv(
self,
stride,
size,
group,
Reported by Pylint.
benchmarks/distributed/ddp/compare/python_ddp.py
29 issues
Line: 2
Column: 1
import functools
import torch.distributed as dist
import torch.nn as nn
class PythonDDP(nn.Module):
"""
Python only implementation for DistributedDataParallel module.
Unlike the production DistributedDataParallel which relies on many C++ core
Reported by Pylint.
Line: 3
Column: 1
import functools
import torch.distributed as dist
import torch.nn as nn
class PythonDDP(nn.Module):
"""
Python only implementation for DistributedDataParallel module.
Unlike the production DistributedDataParallel which relies on many C++ core
Reported by Pylint.
Line: 1
Column: 1
import functools
import torch.distributed as dist
import torch.nn as nn
class PythonDDP(nn.Module):
"""
Python only implementation for DistributedDataParallel module.
Unlike the production DistributedDataParallel which relies on many C++ core
Reported by Pylint.
Line: 61
Column: 13
self.buffer = None
self.ready_param_grad_count = 0
self.total_elements = 0
self._MAX_BUFFER_SIZE = max_buffer_size
def __str__(self):
return "Bucket: num_params={}, total_elements={}, ready_param_grad_count={}".format(
len(self.param_to_offset),
self.total_elements,
Reported by Pylint.
Line: 74
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
Returns whether grad for all the params in current bucket are ready
and copied to self.buffer.
"""
assert self.ready_param_grad_count >= 0
assert self.ready_param_grad_count <= len(self.param_to_offset)
return len(self.param_to_offset) == self.ready_param_grad_count
def empty(self):
self.ready_param_grad_count = 0
Reported by Bandit.
Line: 75
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
and copied to self.buffer.
"""
assert self.ready_param_grad_count >= 0
assert self.ready_param_grad_count <= len(self.param_to_offset)
return len(self.param_to_offset) == self.ready_param_grad_count
def empty(self):
self.ready_param_grad_count = 0
Reported by Bandit.
Line: 78
Column: 9
assert self.ready_param_grad_count <= len(self.param_to_offset)
return len(self.param_to_offset) == self.ready_param_grad_count
def empty(self):
self.ready_param_grad_count = 0
def try_hold_param(self, param):
"""
Checks whether current bucket has enough buffer to hold the incoming
Reported by Pylint.
Line: 87
Column: 13
param. If there is enough space, distribute param into current
bucket and Returns true. Otherwise, returns False.
"""
if self.total_elements + param.numel() <= self._MAX_BUFFER_SIZE :
self.param_to_offset[param] = self.total_elements
self.total_elements += param.numel()
return True
else:
return False
Reported by Pylint.
Line: 95
Column: 9
return False
def __init__(self, module, process_group, async_reduction=True, buffer_size=2 ** 22):
super(PythonDDP, self).__init__()
self.module = module
self.process_group = process_group
self.world_size = dist.get_world_size(group=self.process_group)
self.async_reduction = async_reduction
Reported by Pylint.
Line: 107
Column: 1
# Ensure buffer_size is large enough to hold largest param.
max_numel = max(p.numel() for p in module.parameters())
assert buffer_size > max_numel, "buffer_size: {} should be larger than largest param: {}".format(buffer_size, max_numel)
# Build buckets for params
self.param_to_bucket, self.buckets = self._build_buckets_for_params(buffer_size)
# Register per-parameter hook to be invoked when grad is ready.
Reported by Pylint.
test/jit/test_backend_nnapi.py
29 issues
Line: 5
Column: 1
import sys
import unittest
import torch
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN
Reported by Pylint.
Line: 6
Column: 1
import unittest
import torch
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN
# Make the helper files in test/ importable
Reported by Pylint.
Line: 9
Column: 1
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 22
Column: 1
"instead."
)
"""
Unit Tests for Nnapi backend with delegate
Inherits most tests from TestNNAPI, which loads Android NNAPI models
without the delegate API.
"""
# First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests.
Reported by Pylint.
Line: 53
Column: 16
# Override
def call_lowering_to_nnapi(self, traced_module, args):
compile_spec = {"forward": {"inputs": args}}
return torch._C._jit_to_backend("nnapi", traced_module, compile_spec)
def test_tensor_input(self):
# Lower a simple module
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
Reported by Pylint.
Line: 53
Column: 16
# Override
def call_lowering_to_nnapi(self, traced_module, args):
compile_spec = {"forward": {"inputs": args}}
return torch._C._jit_to_backend("nnapi", traced_module, compile_spec)
def test_tensor_input(self):
# Lower a simple module
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
Reported by Pylint.
Line: 81
Column: 13
# No forward key
compile_spec = {"backward": {"inputs": args}}
with self.assertRaisesRegex(RuntimeError, "method_compile_spec does not contain the \"forward\" key." + errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No dictionary under the forward key
compile_spec = {"forward": 1}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
Reported by Pylint.
Line: 81
Column: 13
# No forward key
compile_spec = {"backward": {"inputs": args}}
with self.assertRaisesRegex(RuntimeError, "method_compile_spec does not contain the \"forward\" key." + errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No dictionary under the forward key
compile_spec = {"forward": 1}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
Reported by Pylint.
Line: 89
Column: 13
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No inputs key (in the dictionary under the forward key)
compile_spec = {"forward": {"not inputs": args}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
Reported by Pylint.
Line: 89
Column: 13
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No inputs key (in the dictionary under the forward key)
compile_spec = {"forward": {"not inputs": args}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
Reported by Pylint.
caffe2/python/operator_test/fc_operator_test.py
28 issues
Line: 8
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from hypothesis import assume, given, settings, HealthCheck
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 11
Column: 1
from hypothesis import assume, given, settings, HealthCheck
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
class TestFcOperator(serial.SerializedTestCase):
Reported by Pylint.
Line: 107
Column: 5
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from hypothesis import assume, given, settings, HealthCheck
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 13
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
class TestFcOperator(serial.SerializedTestCase):
def _run_test(self, n, m, k, transposed, multi_dim, dtype, engine, gc, dc):
if dtype == np.float16:
Reported by Pylint.
Line: 16
Column: 1
import unittest
class TestFcOperator(serial.SerializedTestCase):
def _run_test(self, n, m, k, transposed, multi_dim, dtype, engine, gc, dc):
if dtype == np.float16:
# fp16 only supported with CUDA/HIP
assume(core.IsGPUDeviceType(gc.device_type))
dc = [d for d in dc if core.IsGPUDeviceType(d.device_type)]
Reported by Pylint.
Line: 17
Column: 5
class TestFcOperator(serial.SerializedTestCase):
def _run_test(self, n, m, k, transposed, multi_dim, dtype, engine, gc, dc):
if dtype == np.float16:
# fp16 only supported with CUDA/HIP
assume(core.IsGPUDeviceType(gc.device_type))
dc = [d for d in dc if core.IsGPUDeviceType(d.device_type)]
Reported by Pylint.
Line: 17
Column: 5
class TestFcOperator(serial.SerializedTestCase):
def _run_test(self, n, m, k, transposed, multi_dim, dtype, engine, gc, dc):
if dtype == np.float16:
# fp16 only supported with CUDA/HIP
assume(core.IsGPUDeviceType(gc.device_type))
dc = [d for d in dc if core.IsGPUDeviceType(d.device_type)]
Reported by Pylint.
Line: 17
Column: 5
class TestFcOperator(serial.SerializedTestCase):
def _run_test(self, n, m, k, transposed, multi_dim, dtype, engine, gc, dc):
if dtype == np.float16:
# fp16 only supported with CUDA/HIP
assume(core.IsGPUDeviceType(gc.device_type))
dc = [d for d in dc if core.IsGPUDeviceType(d.device_type)]
Reported by Pylint.
Line: 17
Column: 5
class TestFcOperator(serial.SerializedTestCase):
def _run_test(self, n, m, k, transposed, multi_dim, dtype, engine, gc, dc):
if dtype == np.float16:
# fp16 only supported with CUDA/HIP
assume(core.IsGPUDeviceType(gc.device_type))
dc = [d for d in dc if core.IsGPUDeviceType(d.device_type)]
Reported by Pylint.
aten/src/ATen/nnapi/codegen.py
28 issues
Line: 75
Column: 10
]
def main(argv):
struct_members = []
load_functions = []
define_checks = []
for ret, name, args in NNAPI_FUNCTIONS:
Reported by Pylint.
Line: 39
Column: 1
NNAPI_FUNCTIONS = [
("int", "ANeuralNetworks_getDeviceCount", "uint32_t* numDevices"), # noqa: B950
("int", "ANeuralNetworks_getDevice", "uint32_t devIndex, ANeuralNetworksDevice** device"), # noqa: B950
("int", "ANeuralNetworksDevice_getName", "const ANeuralNetworksDevice* device, const char** name"), # noqa: B950
("int", "ANeuralNetworksDevice_getVersion", "const ANeuralNetworksDevice* device, const char** version"), # noqa: B950
("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"), # noqa: B950
("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"), # noqa: B950
("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"), # noqa: B950
Reported by Pylint.
Line: 40
Column: 1
NNAPI_FUNCTIONS = [
("int", "ANeuralNetworks_getDeviceCount", "uint32_t* numDevices"), # noqa: B950
("int", "ANeuralNetworks_getDevice", "uint32_t devIndex, ANeuralNetworksDevice** device"), # noqa: B950
("int", "ANeuralNetworksDevice_getName", "const ANeuralNetworksDevice* device, const char** name"), # noqa: B950
("int", "ANeuralNetworksDevice_getVersion", "const ANeuralNetworksDevice* device, const char** version"), # noqa: B950
("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"), # noqa: B950
("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"), # noqa: B950
("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"), # noqa: B950
("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"), # noqa: B950
Reported by Pylint.
Line: 41
Column: 1
("int", "ANeuralNetworks_getDeviceCount", "uint32_t* numDevices"), # noqa: B950
("int", "ANeuralNetworks_getDevice", "uint32_t devIndex, ANeuralNetworksDevice** device"), # noqa: B950
("int", "ANeuralNetworksDevice_getName", "const ANeuralNetworksDevice* device, const char** name"), # noqa: B950
("int", "ANeuralNetworksDevice_getVersion", "const ANeuralNetworksDevice* device, const char** version"), # noqa: B950
("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"), # noqa: B950
("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"), # noqa: B950
("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"), # noqa: B950
("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"), # noqa: B950
("int", "ANeuralNetworksMemory_createFromFd", "size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory"), # noqa: B950
Reported by Pylint.
Line: 42
Column: 1
("int", "ANeuralNetworks_getDevice", "uint32_t devIndex, ANeuralNetworksDevice** device"), # noqa: B950
("int", "ANeuralNetworksDevice_getName", "const ANeuralNetworksDevice* device, const char** name"), # noqa: B950
("int", "ANeuralNetworksDevice_getVersion", "const ANeuralNetworksDevice* device, const char** version"), # noqa: B950
("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"), # noqa: B950
("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"), # noqa: B950
("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"), # noqa: B950
("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"), # noqa: B950
("int", "ANeuralNetworksMemory_createFromFd", "size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory"), # noqa: B950
("void", "ANeuralNetworksMemory_free", "ANeuralNetworksMemory* memory"), # noqa: B950
Reported by Pylint.
Line: 43
Column: 1
("int", "ANeuralNetworksDevice_getName", "const ANeuralNetworksDevice* device, const char** name"), # noqa: B950
("int", "ANeuralNetworksDevice_getVersion", "const ANeuralNetworksDevice* device, const char** version"), # noqa: B950
("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"), # noqa: B950
("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"), # noqa: B950
("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"), # noqa: B950
("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"), # noqa: B950
("int", "ANeuralNetworksMemory_createFromFd", "size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory"), # noqa: B950
("void", "ANeuralNetworksMemory_free", "ANeuralNetworksMemory* memory"), # noqa: B950
("int", "ANeuralNetworksModel_create", "ANeuralNetworksModel** model"), # noqa: B950
Reported by Pylint.
Line: 44
Column: 1
("int", "ANeuralNetworksDevice_getVersion", "const ANeuralNetworksDevice* device, const char** version"), # noqa: B950
("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"), # noqa: B950
("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"), # noqa: B950
("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"), # noqa: B950
("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"), # noqa: B950
("int", "ANeuralNetworksMemory_createFromFd", "size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory"), # noqa: B950
("void", "ANeuralNetworksMemory_free", "ANeuralNetworksMemory* memory"), # noqa: B950
("int", "ANeuralNetworksModel_create", "ANeuralNetworksModel** model"), # noqa: B950
("void", "ANeuralNetworksModel_free", "ANeuralNetworksModel* model"), # noqa: B950
Reported by Pylint.
Line: 45
Column: 1
("int", "ANeuralNetworksDevice_getFeatureLevel", "const ANeuralNetworksDevice* device, int64_t* featureLevel"), # noqa: B950
("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"), # noqa: B950
("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"), # noqa: B950
("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"), # noqa: B950
("int", "ANeuralNetworksMemory_createFromFd", "size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory"), # noqa: B950
("void", "ANeuralNetworksMemory_free", "ANeuralNetworksMemory* memory"), # noqa: B950
("int", "ANeuralNetworksModel_create", "ANeuralNetworksModel** model"), # noqa: B950
("void", "ANeuralNetworksModel_free", "ANeuralNetworksModel* model"), # noqa: B950
("int", "ANeuralNetworksModel_finish", "ANeuralNetworksModel* model"), # noqa: B950
Reported by Pylint.
Line: 46
Column: 1
("int", "ANeuralNetworksModel_getSupportedOperationsForDevices", " const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, bool* supportedOps"), # noqa: B950
("int", "ANeuralNetworksCompilation_createForDevices", "ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation"), # noqa: B950
("int", "ANeuralNetworksExecution_compute", "ANeuralNetworksExecution* execution"), # noqa: B950
("int", "ANeuralNetworksMemory_createFromFd", "size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory"), # noqa: B950
("void", "ANeuralNetworksMemory_free", "ANeuralNetworksMemory* memory"), # noqa: B950
("int", "ANeuralNetworksModel_create", "ANeuralNetworksModel** model"), # noqa: B950
("void", "ANeuralNetworksModel_free", "ANeuralNetworksModel* model"), # noqa: B950
("int", "ANeuralNetworksModel_finish", "ANeuralNetworksModel* model"), # noqa: B950
("int", "ANeuralNetworksModel_addOperand", "ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type"), # noqa: B950
Reported by Pylint.
Line: 51
Column: 1
("int", "ANeuralNetworksModel_create", "ANeuralNetworksModel** model"), # noqa: B950
("void", "ANeuralNetworksModel_free", "ANeuralNetworksModel* model"), # noqa: B950
("int", "ANeuralNetworksModel_finish", "ANeuralNetworksModel* model"), # noqa: B950
("int", "ANeuralNetworksModel_addOperand", "ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type"), # noqa: B950
("int", "ANeuralNetworksModel_setOperandValue", "ANeuralNetworksModel* model, int32_t index, const void* buffer, size_t length"), # noqa: B950
("int", "ANeuralNetworksModel_setOperandValueFromMemory", "ANeuralNetworksModel* model, int32_t index, const ANeuralNetworksMemory* memory, size_t offset, size_t length"), # noqa: B950
("int", "ANeuralNetworksModel_addOperation", "ANeuralNetworksModel* model, ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, const uint32_t* outputs"), # noqa: B950
("int", "ANeuralNetworksModel_identifyInputsAndOutputs", "ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, const uint32_t* outputs"), # noqa: B950
("int", "ANeuralNetworksModel_relaxComputationFloat32toFloat16", "ANeuralNetworksModel* model, bool allow"), # noqa: B950
Reported by Pylint.
torch/distributed/rpc/backend_registry.py
28 issues
Line: 9
Column: 1
import torch
import torch.distributed as dist
from . import api
from . import constants as rpc_constants
BackendValue = collections.namedtuple(
"BackendValue", ["construct_rpc_backend_options_handler", "init_backend_handler"]
Reported by Pylint.
Line: 10
Column: 1
import torch.distributed as dist
from . import api
from . import constants as rpc_constants
BackendValue = collections.namedtuple(
"BackendValue", ["construct_rpc_backend_options_handler", "init_backend_handler"]
)
Reported by Pylint.
Line: 138
Column: 5
_channels=None,
**kwargs
):
from . import TensorPipeRpcBackendOptions
return TensorPipeRpcBackendOptions(
rpc_timeout=rpc_timeout,
init_method=init_method,
num_worker_threads=num_worker_threads,
Reported by Pylint.
Line: 162
Column: 69
my_name, my_device_count, my_device_maps, my_devices, group
):
gathered: List[Tuple[
str, int, Dict[str, Dict[torch.device, torch.device]], List[torch.device]
]] = [("", 0, {}, []) for _ in range(group.size())]
dist.all_gather_object(
gathered, (my_name, my_device_count, my_device_maps, my_devices), group
)
all_names = [name for name, _, _, _ in gathered]
Reported by Pylint.
Line: 162
Column: 34
my_name, my_device_count, my_device_maps, my_devices, group
):
gathered: List[Tuple[
str, int, Dict[str, Dict[torch.device, torch.device]], List[torch.device]
]] = [("", 0, {}, []) for _ in range(group.size())]
dist.all_gather_object(
gathered, (my_name, my_device_count, my_device_maps, my_devices), group
)
all_names = [name for name, _, _, _ in gathered]
Reported by Pylint.
Line: 162
Column: 48
my_name, my_device_count, my_device_maps, my_devices, group
):
gathered: List[Tuple[
str, int, Dict[str, Dict[torch.device, torch.device]], List[torch.device]
]] = [("", 0, {}, []) for _ in range(group.size())]
dist.all_gather_object(
gathered, (my_name, my_device_count, my_device_maps, my_devices), group
)
all_names = [name for name, _, _, _ in gathered]
Reported by Pylint.
Line: 236
Column: 41
)
# passed all checked, construct reverse mapping for return values
reverse_device_maps: Dict[str, Dict[torch.device, torch.device]] = {}
for node in all_names:
if my_name in all_device_maps[node]:
reverse_device_maps[node] = {
v: k for k, v in all_device_maps[node][my_name].items()
}
Reported by Pylint.
Line: 236
Column: 55
)
# passed all checked, construct reverse mapping for return values
reverse_device_maps: Dict[str, Dict[torch.device, torch.device]] = {}
for node in all_names:
if my_name in all_device_maps[node]:
reverse_device_maps[node] = {
v: k for k, v in all_device_maps[node][my_name].items()
}
Reported by Pylint.
Line: 244
Column: 26
}
if not my_devices:
devices_set: Set[torch.device] = set()
for _, map_ in my_device_maps.items():
devices_set.update(map_.keys())
for _, map_ in reverse_device_maps.items():
devices_set.update(map_.keys())
devices_set.discard(torch.device("cpu"))
Reported by Pylint.
Line: 249
Column: 29
devices_set.update(map_.keys())
for _, map_ in reverse_device_maps.items():
devices_set.update(map_.keys())
devices_set.discard(torch.device("cpu"))
my_devices = list(devices_set)
my_devices = sorted(my_devices, key=lambda d: d.index)
return reverse_device_maps, my_devices
Reported by Pylint.
torch/distributions/dirichlet.py
28 issues
Line: 11
Column: 12
# This helper is exposed for testing.
def _Dirichlet_backward(x, concentration, grad_output):
total = concentration.sum(-1, True).expand_as(concentration)
grad = torch._dirichlet_grad(x, concentration, total)
return grad * (grad_output - (x * grad_output).sum(-1, True))
class _Dirichlet(Function):
@staticmethod
Reported by Pylint.
Line: 18
Column: 13
class _Dirichlet(Function):
@staticmethod
def forward(ctx, concentration):
x = torch._sample_dirichlet(concentration)
ctx.save_for_backward(x, concentration)
return x
@staticmethod
@once_differentiable
Reported by Pylint.
Line: 56
Column: 23
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Dirichlet, _instance)
batch_shape = torch.Size(batch_shape)
new.concentration = self.concentration.expand(batch_shape + self.event_shape)
super(Dirichlet, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
Reported by Pylint.
Line: 70
Column: 18
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
torch.lgamma(self.concentration.sum(-1)) -
torch.lgamma(self.concentration).sum(-1))
@property
def mean(self):
Reported by Pylint.
Line: 71
Column: 17
if self._validate_args:
self._validate_sample(value)
return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
torch.lgamma(self.concentration.sum(-1)) -
torch.lgamma(self.concentration).sum(-1))
@property
def mean(self):
return self.concentration / self.concentration.sum(-1, True)
Reported by Pylint.
Line: 72
Column: 17
self._validate_sample(value)
return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
torch.lgamma(self.concentration.sum(-1)) -
torch.lgamma(self.concentration).sum(-1))
@property
def mean(self):
return self.concentration / self.concentration.sum(-1, True)
Reported by Pylint.
Line: 86
Column: 17
def entropy(self):
k = self.concentration.size(-1)
a0 = self.concentration.sum(-1)
return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
(k - a0) * torch.digamma(a0) -
((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
@property
def _natural_params(self):
Reported by Pylint.
Line: 86
Column: 60
def entropy(self):
k = self.concentration.size(-1)
a0 = self.concentration.sum(-1)
return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
(k - a0) * torch.digamma(a0) -
((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
@property
def _natural_params(self):
Reported by Pylint.
Line: 87
Column: 28
k = self.concentration.size(-1)
a0 = self.concentration.sum(-1)
return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
(k - a0) * torch.digamma(a0) -
((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
@property
def _natural_params(self):
return (self.concentration, )
Reported by Pylint.
Line: 88
Column: 47
a0 = self.concentration.sum(-1)
return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
(k - a0) * torch.digamma(a0) -
((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
@property
def _natural_params(self):
return (self.concentration, )
Reported by Pylint.