The following issues were found
torch/quantization/ns/utils.py
93 issues
Line: 21
Column: 1
from torch.quantization.utils import getattr_from_fqn
from torch.quantization.quantize import is_activation_post_process
from .ns_types import NSNodeTargetType, NSResultsType
# TODO(future PR): consider deleting this enum and using the torch types
# directly. This might be tricky because it is not a one to one mapping.
class NodeInputOrOutputType(enum.Enum):
FP32 = enum.auto() # torch.float
Reported by Pylint.
Line: 138
Column: 42
cur_node_dtype_target = node.args[1]
assert (
cur_node_dtype_target is torch.float16
), f"{cur_node_dtype_target} handling needs to be added"
return (prev_node_output_type, NodeInputOrOutputType.FP16)
elif node.target in METHS_IO_TYPE_FP32_OR_INT8:
Reported by Pylint.
Line: 181
Column: 32
if prev_node.op == "call_function":
# quantize - read the args directly
if prev_node.target == torch.quantize_per_tensor:
return _get_scale_zp_from_function_args(prev_node, gm, 1, 2)
elif prev_node.target in (toq.add, toq.add_relu, toq.mul, toq.mul_relu):
return _get_scale_zp_from_function_args(prev_node, gm, 2, 3)
return None
Reported by Pylint.
Line: 306
Column: 25
return []
if node.op == "call_function" and (
# TODO(future PR): use relationship map instead of hardcoding
node.target in (torch.add, torch.ops.quantized.add, operator.add)
or node.target in (torch.mul, torch.ops.quantized.mul, operator.mul)
):
result = []
for i in range(2):
if type(node.args[i]) == Node:
Reported by Pylint.
Line: 307
Column: 28
if node.op == "call_function" and (
# TODO(future PR): use relationship map instead of hardcoding
node.target in (torch.add, torch.ops.quantized.add, operator.add)
or node.target in (torch.mul, torch.ops.quantized.mul, operator.mul)
):
result = []
for i in range(2):
if type(node.args[i]) == Node:
result.append(i)
Reported by Pylint.
Line: 425
Column: 24
a1 = a1.dequantize()
# for the purposes of this util, only handle floats
if a0.dtype != torch.float or a1.dtype != torch.float:
return None
new_args = (a0, a1, *a_other)
return f(*new_args, **kwargs)
Reported by Pylint.
Line: 425
Column: 51
a1 = a1.dequantize()
# for the purposes of this util, only handle floats
if a0.dtype != torch.float or a1.dtype != torch.float:
return None
new_args = (a0, a1, *a_other)
return f(*new_args, **kwargs)
Reported by Pylint.
Line: 438
Column: 17
def compute_sqnr(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
Ps = torch.norm(x)
Pn = torch.norm(x - y)
return 20 * torch.log10(Ps / Pn)
@maybe_dequantize_first_two_tensor_args_and_handle_tuples
def compute_normalized_l2_error(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return torch.sqrt(((x - y) ** 2).sum() / (x ** 2).sum())
Reported by Pylint.
Line: 443
Column: 12
@maybe_dequantize_first_two_tensor_args_and_handle_tuples
def compute_normalized_l2_error(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return torch.sqrt(((x - y) ** 2).sum() / (x ** 2).sum())
@maybe_dequantize_first_two_tensor_args_and_handle_tuples
def compute_cosine_similarity(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
# For convolutions, the shape of the quantized weight has one additional
Reported by Pylint.
Line: 23
Column: 3
from .ns_types import NSNodeTargetType, NSResultsType
# TODO(future PR): consider deleting this enum and using the torch types
# directly. This might be tricky because it is not a one to one mapping.
class NodeInputOrOutputType(enum.Enum):
FP32 = enum.auto() # torch.float
INT8 = enum.auto() # torch.qint8 or torch.quint8
FP16 = enum.auto() # torch.float16
Reported by Pylint.
test/onnx/verify.py
93 issues
Line: 1
Column: 1
import torch
import torch.jit
import torch.onnx
import onnx
import onnx.helper
import numpy as np
Reported by Pylint.
Line: 2
Column: 1
import torch
import torch.jit
import torch.onnx
import onnx
import onnx.helper
import numpy as np
Reported by Pylint.
Line: 3
Column: 1
import torch
import torch.jit
import torch.onnx
import onnx
import onnx.helper
import numpy as np
Reported by Pylint.
Line: 5
Column: 1
import torch.jit
import torch.onnx
import onnx
import onnx.helper
import numpy as np
import difflib
Reported by Pylint.
Line: 6
Column: 1
import torch.onnx
import onnx
import onnx.helper
import numpy as np
import difflib
import io
Reported by Pylint.
Line: 72
Column: 13
try:
np.testing.assert_allclose(x, y, rtol=self.rtol, atol=self.atol,
equal_nan=True, verbose=True)
except AssertionError as e:
raise
k("{}{}".format(colonize(msg), str(e).lstrip()))
else:
raise RuntimeError("Unsupported almost equal test")
Reported by Pylint.
Line: 74
Column: 17
equal_nan=True, verbose=True)
except AssertionError as e:
raise
k("{}{}".format(colonize(msg), str(e).lstrip()))
else:
raise RuntimeError("Unsupported almost equal test")
def requireEqual(self, x, y, msg=None):
"""
Reported by Pylint.
Line: 109
Column: 13
elif isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
try:
np.testing.assert_equal(x, y)
except AssertionError as e:
raise
k("{}{}".format(colonize(msg, ": "), str(e).lstrip()))
else:
if x != y:
# TODO: Better algorithm for lists
Reported by Pylint.
Line: 111
Column: 17
np.testing.assert_equal(x, y)
except AssertionError as e:
raise
k("{}{}".format(colonize(msg, ": "), str(e).lstrip()))
else:
if x != y:
# TODO: Better algorithm for lists
sx = str(x)
sy = str(y)
Reported by Pylint.
Line: 114
Column: 3
k("{}{}".format(colonize(msg, ": "), str(e).lstrip()))
else:
if x != y:
# TODO: Better algorithm for lists
sx = str(x)
sy = str(y)
if len(sx) > 40 or len(sy) > 40 or "\n" in sx or "\n" in sy:
# long form
l = "=" * 50
Reported by Pylint.
test/ao/sparsity/test_kernels.py
93 issues
Line: 2
Column: 1
# -*- coding: utf-8 -*-
from torch.testing._internal.common_utils import run_tests
import copy
import numpy as np
import io
import logging
from itertools import product
Reported by Pylint.
Line: 10
Column: 1
import logging
from itertools import product
import torch
import torch.quantization as tq
from torch import nn
from torch.ao.nn.sparse import quantized as ao_nn_sq
from torch.ao.nn.sparse.quantized.utils import LinearBlockSparsePattern
Reported by Pylint.
Line: 11
Column: 1
from itertools import product
import torch
import torch.quantization as tq
from torch import nn
from torch.ao.nn.sparse import quantized as ao_nn_sq
from torch.ao.nn.sparse.quantized.utils import LinearBlockSparsePattern
Reported by Pylint.
Line: 13
Column: 1
import torch
import torch.quantization as tq
from torch import nn
from torch.ao.nn.sparse import quantized as ao_nn_sq
from torch.ao.nn.sparse.quantized.utils import LinearBlockSparsePattern
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantized import (
Reported by Pylint.
Line: 14
Column: 1
import torch.quantization as tq
from torch import nn
from torch.ao.nn.sparse import quantized as ao_nn_sq
from torch.ao.nn.sparse.quantized.utils import LinearBlockSparsePattern
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantized import (
override_cpu_allocator_for_qnnpack,
Reported by Pylint.
Line: 15
Column: 1
from torch import nn
from torch.ao.nn.sparse import quantized as ao_nn_sq
from torch.ao.nn.sparse.quantized.utils import LinearBlockSparsePattern
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantized import (
override_cpu_allocator_for_qnnpack,
override_qengines,
Reported by Pylint.
Line: 17
Column: 1
from torch.ao.nn.sparse import quantized as ao_nn_sq
from torch.ao.nn.sparse.quantized.utils import LinearBlockSparsePattern
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantized import (
override_cpu_allocator_for_qnnpack,
override_qengines,
qengine_is_qnnpack,
qengine_is_fbgemm,
Reported by Pylint.
Line: 18
Column: 1
from torch.ao.nn.sparse.quantized.utils import LinearBlockSparsePattern
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantized import (
override_cpu_allocator_for_qnnpack,
override_qengines,
qengine_is_qnnpack,
qengine_is_fbgemm,
)
Reported by Pylint.
Line: 25
Column: 3
qengine_is_fbgemm,
)
# TODO: Once more test files are created, move the contents to a ao folder.
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class TestQuantizedSparseKernels(TestCase):
@override_qengines
Reported by Pylint.
Line: 130
Column: 9
W_zp = 0
X_fp32 = torch.randn(batch_size, input_channels, dtype=torch.float32)
float_bias = torch.randn(output_channels, dtype=torch.float32)
W_fp32 = torch.randn(output_channels, input_channels, dtype=torch.float32)
mask = torch.randint(0, 2, W_fp32.shape)
W_fp32 *= mask
Reported by Pylint.
caffe2/python/control_test.py
92 issues
Line: 1
Column: 1
from caffe2.python import control, core, test_util, workspace
import logging
logger = logging.getLogger(__name__)
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python import control, core, test_util, workspace
import logging
logger = logging.getLogger(__name__)
class TestControl(test_util.TestCase):
def setUp(self):
Reported by Pylint.
Line: 12
Column: 1
logger = logging.getLogger(__name__)
class TestControl(test_util.TestCase):
def setUp(self):
super(TestControl, self).setUp()
self.N_ = 10
self.init_net_ = core.Net("init-net")
Reported by Pylint.
Line: 12
Column: 1
logger = logging.getLogger(__name__)
class TestControl(test_util.TestCase):
def setUp(self):
super(TestControl, self).setUp()
self.N_ = 10
self.init_net_ = core.Net("init-net")
Reported by Pylint.
Line: 12
Column: 1
logger = logging.getLogger(__name__)
class TestControl(test_util.TestCase):
def setUp(self):
super(TestControl, self).setUp()
self.N_ = 10
self.init_net_ = core.Net("init-net")
Reported by Pylint.
Line: 14
Column: 9
class TestControl(test_util.TestCase):
def setUp(self):
super(TestControl, self).setUp()
self.N_ = 10
self.init_net_ = core.Net("init-net")
cnt = self.init_net_.CreateCounter([], init_count=0)
const_n = self.init_net_.ConstantFill(
Reported by Pylint.
Line: 15
Column: 9
class TestControl(test_util.TestCase):
def setUp(self):
super(TestControl, self).setUp()
self.N_ = 10
self.init_net_ = core.Net("init-net")
cnt = self.init_net_.CreateCounter([], init_count=0)
const_n = self.init_net_.ConstantFill(
[], shape=[], value=self.N_, dtype=core.DataType.INT64)
Reported by Pylint.
Line: 59
Column: 5
self.idle_net_.ConstantFill(
[], shape=[], value=0, dtype=core.DataType.INT64)
def CheckNetOutput(self, nets_and_expects):
"""
Check the net output is expected
nets_and_expects is a list of tuples (net, expect)
"""
for net, expect in nets_and_expects:
Reported by Pylint.
Line: 69
Column: 5
net.Proto().external_output[-1])
self.assertEqual(output, expect)
def CheckNetAllOutput(self, net, expects):
"""
Check the net output is expected
expects is a list of bools.
"""
self.assertEqual(len(net.Proto().external_output), len(expects))
Reported by Pylint.
Line: 75
Column: 9
expects is a list of bools.
"""
self.assertEqual(len(net.Proto().external_output), len(expects))
for i in range(len(expects)):
output = workspace.FetchBlob(
net.Proto().external_output[i])
self.assertEqual(output, expects[i])
def BuildAndRunPlan(self, step):
Reported by Pylint.
caffe2/python/operator_test/top_k_test.py
92 issues
Line: 6
Column: 1
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
class TestTopK(serial.SerializedTestCase):
Reported by Pylint.
Line: 220
Column: 47
@given(X=hu.tensor(dtype=np.float32), k=st.integers(1, 5),
axis=st.integers(-1, 5), **hu.gcs)
@settings(deadline=10000)
def test_top_k_grad(self, X, k, axis, gc, dc):
dims = X.shape
if axis >= len(dims):
axis %= len(dims)
input_axis = len(dims) - 1 if axis == -1 else axis
Reported by Pylint.
Line: 1
Column: 1
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
Reported by Pylint.
Line: 15
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
class TestTopK(serial.SerializedTestCase):
def top_k_ref(self, X, k, flatten_indices, axis=-1):
in_dims = X.shape
out_dims = list(in_dims)
out_dims[axis] = k
Reported by Pylint.
Line: 17
Column: 5
class TestTopK(serial.SerializedTestCase):
def top_k_ref(self, X, k, flatten_indices, axis=-1):
in_dims = X.shape
out_dims = list(in_dims)
out_dims[axis] = k
out_dims = tuple(out_dims)
if axis == -1:
Reported by Pylint.
Line: 17
Column: 5
class TestTopK(serial.SerializedTestCase):
def top_k_ref(self, X, k, flatten_indices, axis=-1):
in_dims = X.shape
out_dims = list(in_dims)
out_dims[axis] = k
out_dims = tuple(out_dims)
if axis == -1:
Reported by Pylint.
Line: 17
Column: 5
class TestTopK(serial.SerializedTestCase):
def top_k_ref(self, X, k, flatten_indices, axis=-1):
in_dims = X.shape
out_dims = list(in_dims)
out_dims[axis] = k
out_dims = tuple(out_dims)
if axis == -1:
Reported by Pylint.
Line: 17
Column: 5
class TestTopK(serial.SerializedTestCase):
def top_k_ref(self, X, k, flatten_indices, axis=-1):
in_dims = X.shape
out_dims = list(in_dims)
out_dims[axis] = k
out_dims = tuple(out_dims)
if axis == -1:
Reported by Pylint.
Line: 30
Column: 9
prev_dims *= in_dims[i]
for i in range(axis + 1, len(in_dims)):
next_dims *= in_dims[i]
n = in_dims[axis]
X_flat = X.reshape((prev_dims, n, next_dims))
values_ref = np.ndarray(
shape=(prev_dims, k, next_dims), dtype=np.float32)
values_ref.fill(0)
Reported by Pylint.
caffe2/python/operator_test/deform_conv_test.py
92 issues
Line: 6
Column: 1
import unittest
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, utils, workspace
from hypothesis import assume, given
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, utils, workspace
from hypothesis import assume, given
def _cudnn_supports(dilation=False, nhwc=False):
"""Return True if cuDNN supports this configuration."""
v = workspace.GetCuDNNVersion()
Reported by Pylint.
Line: 190
Column: 1
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
reference_op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y0"],
stride=stride,
Reported by Pylint.
Line: 288
Column: 1
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
reference_op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y0"],
stride=stride,
Reported by Pylint.
Line: 398
Column: 1
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
with core.DeviceScope(gc):
workspace.FeedBlob("w0", w0)
reference_op = core.CreateOperator(
"Conv",
["X", "w0", "b"] if use_bias else ["X", "w0"],
Reported by Pylint.
Line: 601
Column: 5
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, utils, workspace
Reported by Pylint.
Line: 15
Column: 5
def _cudnn_supports(dilation=False, nhwc=False):
"""Return True if cuDNN supports this configuration."""
v = workspace.GetCuDNNVersion()
if dilation and v < 6000:
# Dilation not supported until v6
return False
if dilation and nhwc:
# Dilation and NHWC not supported together
Reported by Pylint.
Line: 29
Column: 1
return max(1, int((size + pad * 2 - (dilation * (kernel - 1) + 1)) / stride) + 1)
def _conv_2d_output_size(size, kernel, pad_h, pad_w, dilation, stride_h, stride_w):
return [
_conv_1d_output_size(size, kernel, pad_h, dilation, stride_h),
_conv_1d_output_size(size, kernel, pad_w, dilation, stride_w),
]
Reported by Pylint.
Line: 36
Column: 1
]
def _conv_2d_offsets_dims(
batch_size,
size,
kernel,
pad_h,
pad_w,
Reported by Pylint.
torch/nn/parallel/distributed.py
92 issues
Line: 28
Column: 1
from torch._utils import _get_device_index
from ..modules import Module
from ._functions import _get_stream
from .scatter_gather import gather, is_namedtuple, scatter_kwargs
def _tree_flatten_with_rref(output):
Reported by Pylint.
Line: 29
Column: 1
from torch._utils import _get_device_index
from ..modules import Module
from ._functions import _get_stream
from .scatter_gather import gather, is_namedtuple, scatter_kwargs
def _tree_flatten_with_rref(output):
output_is_rref = RPC_AVAILABLE and isinstance(output, RRef)
Reported by Pylint.
Line: 30
Column: 1
from ..modules import Module
from ._functions import _get_stream
from .scatter_gather import gather, is_namedtuple, scatter_kwargs
def _tree_flatten_with_rref(output):
output_is_rref = RPC_AVAILABLE and isinstance(output, RRef)
if output_is_rref:
Reported by Pylint.
Line: 700
Column: 46
# parameters(module). parameters(module) is only needed in the
# single-process multi device case, where it accesses replicated
# parameters through _former_parameters.
for param_name, param in module.named_parameters(recurse=False)
if param.requires_grad
and f"{module_name}.{param_name}" not in self.parameters_to_ignore
]
]
]
Reported by Pylint.
Line: 852
Column: 16
def forward(self, *inputs, **kwargs):
with torch.autograd.profiler.record_function("DistributedDataParallel.forward"):
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.logger.set_runtime_stats_and_log()
self.num_iterations += 1
self.reducer.prepare_for_forward()
# Notify the join context that this process has not joined, if
Reported by Pylint.
Line: 871
Column: 16
# call _rebuild_buckets before the peak memory usage increases
# during forward computation.
# This should be called only once during whole training period.
if torch.is_grad_enabled() and self.reducer._rebuild_buckets():
logging.info("Reducer buckets have been rebuilt in this iteration.")
self._has_rebuilt_buckets = True
if self.require_forward_param_sync:
self._sync_params()
Reported by Pylint.
Line: 888
Column: 16
else:
output = self.module(*inputs, **kwargs)
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.require_forward_param_sync = True
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
Reported by Pylint.
Line: 953
Column: 34
def to_map(obj):
if isinstance(obj, torch.Tensor):
if obj.device == torch.device("cuda", target_gpu):
return (obj,)
if not self.use_side_stream_for_tensor_copies:
return (obj.to(target_gpu),)
else:
# Perform CPU -> GPU copies in a background stream. This code is
Reported by Pylint.
Line: 1011
Column: 28
# forward pass to determine the no. of currently active processes and whether
# all processes have joined.
def _schedule_shadow_all_reduce_for_fwd_pass(self):
all_active_procs = torch.zeros(1, device=self.device)
dist.all_reduce(all_active_procs, group=self.process_group)
return all_active_procs.item()
# When running in join mode, schedules an allreduce to notify joined ranks
# of whether backwards pass synchronization will run this iteraton or not.
Reported by Pylint.
Line: 1019
Column: 36
# of whether backwards pass synchronization will run this iteraton or not.
def _check_global_requires_backward_grad_sync(self, is_joined_rank):
if not is_joined_rank and self.require_backward_grad_sync:
requires_sync_tensor = torch.ones(1, device=self.device)
else:
requires_sync_tensor = torch.zeros(1, device=self.device)
work = dist.all_reduce(
requires_sync_tensor, group=self.process_group, async_op=True
Reported by Pylint.
torch/distributions/continuous_bernoulli.py
92 issues
Line: 59
Column: 27
self.logits, = broadcast_all(logits)
self._param = self.probs if probs is not None else self.logits
if is_scalar:
batch_shape = torch.Size()
else:
batch_shape = self._param.size()
self._lims = lims
super(ContinuousBernoulli, self).__init__(batch_shape, validate_args=validate_args)
Reported by Pylint.
Line: 68
Column: 23
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(ContinuousBernoulli, _instance)
new._lims = self._lims
batch_shape = torch.Size(batch_shape)
if 'probs' in self.__dict__:
new.probs = self.probs.expand(batch_shape)
new._param = new.probs
if 'logits' in self.__dict__:
new.logits = self.logits.expand(batch_shape)
Reported by Pylint.
Line: 83
Column: 16
return self._param.new(*args, **kwargs)
def _outside_unstable_region(self):
return torch.max(torch.le(self.probs, self._lims[0]),
torch.gt(self.probs, self._lims[1]))
def _cut_probs(self):
return torch.where(self._outside_unstable_region(),
self.probs,
Reported by Pylint.
Line: 83
Column: 26
return self._param.new(*args, **kwargs)
def _outside_unstable_region(self):
return torch.max(torch.le(self.probs, self._lims[0]),
torch.gt(self.probs, self._lims[1]))
def _cut_probs(self):
return torch.where(self._outside_unstable_region(),
self.probs,
Reported by Pylint.
Line: 84
Column: 26
def _outside_unstable_region(self):
return torch.max(torch.le(self.probs, self._lims[0]),
torch.gt(self.probs, self._lims[1]))
def _cut_probs(self):
return torch.where(self._outside_unstable_region(),
self.probs,
self._lims[0] * torch.ones_like(self.probs))
Reported by Pylint.
Line: 87
Column: 16
torch.gt(self.probs, self._lims[1]))
def _cut_probs(self):
return torch.where(self._outside_unstable_region(),
self.probs,
self._lims[0] * torch.ones_like(self.probs))
def _cont_bern_log_norm(self):
'''computes the log normalizing constant as a function of the 'probs' parameter'''
Reported by Pylint.
Line: 89
Column: 44
def _cut_probs(self):
return torch.where(self._outside_unstable_region(),
self.probs,
self._lims[0] * torch.ones_like(self.probs))
def _cont_bern_log_norm(self):
'''computes the log normalizing constant as a function of the 'probs' parameter'''
cut_probs = self._cut_probs()
cut_probs_below_half = torch.where(torch.le(cut_probs, 0.5),
Reported by Pylint.
Line: 94
Column: 32
def _cont_bern_log_norm(self):
'''computes the log normalizing constant as a function of the 'probs' parameter'''
cut_probs = self._cut_probs()
cut_probs_below_half = torch.where(torch.le(cut_probs, 0.5),
cut_probs,
torch.zeros_like(cut_probs))
cut_probs_above_half = torch.where(torch.ge(cut_probs, 0.5),
cut_probs,
torch.ones_like(cut_probs))
Reported by Pylint.
Line: 94
Column: 44
def _cont_bern_log_norm(self):
'''computes the log normalizing constant as a function of the 'probs' parameter'''
cut_probs = self._cut_probs()
cut_probs_below_half = torch.where(torch.le(cut_probs, 0.5),
cut_probs,
torch.zeros_like(cut_probs))
cut_probs_above_half = torch.where(torch.ge(cut_probs, 0.5),
cut_probs,
torch.ones_like(cut_probs))
Reported by Pylint.
Line: 96
Column: 44
cut_probs = self._cut_probs()
cut_probs_below_half = torch.where(torch.le(cut_probs, 0.5),
cut_probs,
torch.zeros_like(cut_probs))
cut_probs_above_half = torch.where(torch.ge(cut_probs, 0.5),
cut_probs,
torch.ones_like(cut_probs))
log_norm = torch.log(torch.abs(torch.log1p(-cut_probs) - torch.log(cut_probs))) - torch.where(
torch.le(cut_probs, 0.5),
Reported by Pylint.
caffe2/python/operator_test/load_save_test.py
92 issues
Line: 1
Column: 1
import hypothesis.strategies as st
from hypothesis import given, assume, settings
import io
import math
import numpy as np
import os
import struct
import unittest
from pathlib import Path
Reported by Pylint.
Line: 2
Column: 1
import hypothesis.strategies as st
from hypothesis import given, assume, settings
import io
import math
import numpy as np
import os
import struct
import unittest
from pathlib import Path
Reported by Pylint.
Line: 12
Column: 1
from pathlib import Path
from typing import Dict, Generator, List, NamedTuple, Optional, Tuple, Type
from caffe2.proto import caffe2_pb2
from caffe2.proto.caffe2_pb2 import BlobSerializationOptions
from caffe2.python import core, test_util, workspace
if workspace.has_gpu_support:
DEVICES = [caffe2_pb2.CPU, workspace.GpuDeviceType]
max_gpuid = workspace.NumGpuDevices() - 1
Reported by Pylint.
Line: 153
Column: 9
class TestLoadSave(TestLoadSaveBase):
def testLoadSave(self):
self.load_save()
def testRepeatedArgs(self):
dtypes = [np.float16, np.float32, np.float64, np.bool, np.int8,
np.int16, np.int32, np.int64, np.uint8, np.uint16]
arrays = [np.random.permutation(6).reshape(2, 3).astype(T)
Reported by Pylint.
Line: 153
Column: 9
class TestLoadSave(TestLoadSaveBase):
def testLoadSave(self):
self.load_save()
def testRepeatedArgs(self):
dtypes = [np.float16, np.float32, np.float64, np.bool, np.int8,
np.int16, np.int32, np.int64, np.uint8, np.uint16]
arrays = [np.random.permutation(6).reshape(2, 3).astype(T)
Reported by Pylint.
Line: 153
Column: 9
class TestLoadSave(TestLoadSaveBase):
def testLoadSave(self):
self.load_save()
def testRepeatedArgs(self):
dtypes = [np.float16, np.float32, np.float64, np.bool, np.int8,
np.int16, np.int32, np.int64, np.uint8, np.uint16]
arrays = [np.random.permutation(6).reshape(2, 3).astype(T)
Reported by Pylint.
Line: 153
Column: 9
class TestLoadSave(TestLoadSaveBase):
def testLoadSave(self):
self.load_save()
def testRepeatedArgs(self):
dtypes = [np.float16, np.float32, np.float64, np.bool, np.int8,
np.int16, np.int32, np.int64, np.uint8, np.uint16]
arrays = [np.random.permutation(6).reshape(2, 3).astype(T)
Reported by Pylint.
Line: 402
Column: 20
def testLoadAllMultipleFilesWithSameKey(self):
tmp_folder = self.make_tempdir()
db_file_1, arrays_1 = self.saveFile(tmp_folder, "db1", self._db_type, 0)
db_file_2, arrays_2 = self.saveFile(tmp_folder, "db2", self._db_type, 0)
db_files = [db_file_1, db_file_2]
workspace.ResetWorkspace()
self.assertEqual(len(workspace.Blobs()), 0)
Reported by Pylint.
Line: 403
Column: 20
def testLoadAllMultipleFilesWithSameKey(self):
tmp_folder = self.make_tempdir()
db_file_1, arrays_1 = self.saveFile(tmp_folder, "db1", self._db_type, 0)
db_file_2, arrays_2 = self.saveFile(tmp_folder, "db2", self._db_type, 0)
db_files = [db_file_1, db_file_2]
workspace.ResetWorkspace()
self.assertEqual(len(workspace.Blobs()), 0)
op = core.CreateOperator(
Reported by Pylint.
Line: 575
Column: 13
self.load_and_check_blobs(blobs, [tmp_file])
blob_chunks = self._read_chunk_info(Path(tmp_file))
for blob_name, chunks in blob_chunks.items():
self.assertEqual(len(chunks), expected_num_chunks)
def testSaveWithChunkSize(self) -> None:
num_elems = 1234
chunk_size = 32
Reported by Pylint.
test/jit/test_enum.py
91 issues
Line: 4
Column: 1
import os
import sys
import torch
from torch.testing import FileCheck
from enum import Enum
from typing import Any, List
# Make the helper files in test/ importable
Reported by Pylint.
Line: 5
Column: 1
import sys
import torch
from torch.testing import FileCheck
from enum import Enum
from typing import Any, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 12
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, make_global
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 179
Column: 21
make_global(Color)
def enum_const(x: Color) -> bool:
if x == Color.PURPLE:
return True
else:
return False
with self.assertRaisesRegexWithHighlight(RuntimeError, "has no attribute 'PURPLE'", "Color.PURPLE"):
Reported by Pylint.
Line: 54
Column: 3
def unsupported_enum_types(a: TensorEnum):
return a.name
# TODO: rewrite code so that the highlight is not empty.
with self.assertRaisesRegexWithHighlight(RuntimeError, "Cannot create Enum with value type 'Tensor'", ""):
torch.jit.script(unsupported_enum_types)
def test_enum_comp(self):
class Color(Enum):
Reported by Pylint.
Line: 107
Column: 3
def enum_comp(x: Color, y: Color) -> bool:
return x == y
# TODO: rewrite code so that the highlight is not empty.
with self.assertRaisesRegexWithHighlight(RuntimeError, "Could not unify type list", ""):
torch.jit.script(enum_comp)
def test_enum_name(self):
class Color(Enum):
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
import torch
from torch.testing import FileCheck
from enum import Enum
from typing import Any, List
# Make the helper files in test/ importable
Reported by Pylint.
Line: 6
Column: 1
import torch
from torch.testing import FileCheck
from enum import Enum
from typing import Any, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 7
Column: 1
import torch
from torch.testing import FileCheck
from enum import Enum
from typing import Any, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, make_global
Reported by Pylint.
Line: 12
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, make_global
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.