The following issues were found
test/ao/sparsity/test_parametrization.py
36 issues
Line: 5
Column: 1
import logging
from torch import nn
from torch.ao.sparsity.sparsifier import utils
from torch.nn.utils import parametrize
import torch
from torch.testing._internal.common_utils import TestCase
Reported by Pylint.
Line: 6
Column: 1
import logging
from torch import nn
from torch.ao.sparsity.sparsifier import utils
from torch.nn.utils import parametrize
import torch
from torch.testing._internal.common_utils import TestCase
Reported by Pylint.
Line: 7
Column: 1
from torch import nn
from torch.ao.sparsity.sparsifier import utils
from torch.nn.utils import parametrize
import torch
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
Reported by Pylint.
Line: 9
Column: 1
from torch.ao.sparsity.sparsifier import utils
from torch.nn.utils import parametrize
import torch
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class ModelUnderTest(nn.Module):
Reported by Pylint.
Line: 10
Column: 1
from torch.nn.utils import parametrize
import torch
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class ModelUnderTest(nn.Module):
def __init__(self, bias=True):
Reported by Pylint.
Line: 1
Column: 1
# -*- coding: utf-8 -*-
import logging
from torch import nn
from torch.ao.sparsity.sparsifier import utils
from torch.nn.utils import parametrize
import torch
Reported by Pylint.
Line: 12
Column: 1
import torch
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class ModelUnderTest(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.linear = nn.Linear(16, 16, bias=bias)
Reported by Pylint.
Line: 14
Column: 1
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class ModelUnderTest(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.linear = nn.Linear(16, 16, bias=bias)
self.seq = nn.Sequential(
nn.Linear(16, 16, bias=bias),
Reported by Pylint.
Line: 14
Column: 1
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class ModelUnderTest(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.linear = nn.Linear(16, 16, bias=bias)
self.seq = nn.Sequential(
nn.Linear(16, 16, bias=bias),
Reported by Pylint.
Line: 32
Column: 5
self.seq[0] = nn.Parameter(torch.zeros_like(self.seq[0].bias) + 20.0)
self.seq[0] = nn.Parameter(torch.zeros_like(self.seq[0].bias) + 30.0)
def forward(self, x):
x = self.linear(x)
x = self.seq(x)
return x
Reported by Pylint.
benchmarks/operator_benchmark/c2/concat_test.py
36 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
import random
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for Concat operator. Supports both Caffe2/PyTorch."""
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
import random
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for Concat operator. Supports both Caffe2/PyTorch."""
Reported by Pylint.
Line: 5
Column: 1
import benchmark_caffe2 as op_bench_c2
import random
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for Concat operator. Supports both Caffe2/PyTorch."""
cross_product_configs = {
Reported by Pylint.
Line: 17
Column: 21
}
# Configs for C2 concat operator
cat_configs_short = op_bench.config_list(
attr_names=['sizes', 'N', 'axis'],
attrs=[
[(1, 1, 1), 2, 0], # noqa: E241
[(512, 512, 2), 2, 1], # noqa: E241
[(128, 1024, 2), 2, 1], # noqa: E241
Reported by Pylint.
Line: 29
Column: 30
)
# Configs specific to static runtime feature - a fast runtime for pared down models
cat_configs_static_runtime = op_bench.config_list(
attr_names=['sizes', 'N', 'axis', 'add_axis'],
attrs=[
[(1, 40), 5, 1, 1],
[[(1, 160), (1, 14)], -1, 1, 0],
[[(1, 20, 40), (1, 4, 40), (1, 5, 40)], -1, 1, 0],
Reported by Pylint.
Line: 45
Column: 20
tags=['static_runtime'],
)
cat_configs_long = op_bench.config_list(
attr_names=['sizes', 'N', 'axis'],
attrs=[
[(2**10, 2**10, 2), 2, 0], # noqa: E241
[(2**10+1, 2**10-1, 2), 2, 1], # noqa: E226,E241
[(2**10, 2**10, 2), 2, 2], # noqa: E241
Reported by Pylint.
Line: 71
Column: 24
)
# There is a different codepath on CUDA for >4 dimensions
cat_configs_multidim = op_bench.config_list(
attr_names=['sizes', 'N', 'axis', 'dtype'],
attrs=[
[(2**6, 2**5, 2**2, 2**4, 2**5), 2, 2], # noqa: E241
[(2**4, 2**5, 2**2, 2**4, 2**5), 8, 2], # noqa: E241
[(2**3+1, 2**5-1, 2**2+1, 2**4-1, 2**5+1), 17, 4], # noqa: E226,E241
Reported by Pylint.
Line: 82
Column: 26
tags=['multidim'],
)
cat_configs_manyinputs = op_bench.config_list(
attr_names=['sizes', 'N', 'axis'],
attrs=[
[[lambda: random.randint(1, 10000)], 100, 0],
[[lambda: random.randint(1, 1000)], 1000, 0],
[[lambda: random.randint(1, 500)], 2000, 0],
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
import random
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for Concat operator. Supports both Caffe2/PyTorch."""
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python import core
"""Microbenchmarks for Concat operator. Supports both Caffe2/PyTorch."""
cross_product_configs = {
'device': ['cpu', 'cuda'],
'dtype': ['float'],
'add_axis': [0],
Reported by Pylint.
caffe2/python/ideep/reshape_op_test.py
36 issues
Line: 13
Column: 22
from caffe2.python import core, workspace
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestReShapeOps(TestCase):
def test_reshape_ops(self):
device_opt = core.DeviceOption(caffe2_pb2.IDEEP, 0)
with core.DeviceScope(device_opt):
workspace.FeedBlob('res', np.array([[0, 0, 0, 0]], dtype=np.float32))
Reported by Pylint.
Line: 101
Column: 13
'input_blob',
np.array(np.random.rand(10, 20, 10), dtype=np.float32))
net = core.Net('mynet')
z, _ = net.Reshape('input_blob',
['z_reshape', 'dummy_size'],
shape=(-1, 10))
workspace.CreateNet(net)
workspace.RunNet(net)
workspace.FeedBlob(
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python.test_util import TestCase
from caffe2.proto import caffe2_pb2
import unittest
import numpy as np
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python.test_util import TestCase
from caffe2.proto import caffe2_pb2
import unittest
import numpy as np
from caffe2.python import core, workspace
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
Reported by Pylint.
Line: 10
Column: 1
from caffe2.proto import caffe2_pb2
import unittest
import numpy as np
from caffe2.python import core, workspace
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestReShapeOps(TestCase):
def test_reshape_ops(self):
Reported by Pylint.
Line: 14
Column: 1
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestReShapeOps(TestCase):
def test_reshape_ops(self):
device_opt = core.DeviceOption(caffe2_pb2.IDEEP, 0)
with core.DeviceScope(device_opt):
workspace.FeedBlob('res', np.array([[0, 0, 0, 0]], dtype=np.float32))
workspace.FeedBlob('shape', np.array([1, 4], dtype=np.int32), core.DeviceOption(caffe2_pb2.CPU, 0))
Reported by Pylint.
Line: 15
Column: 5
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestReShapeOps(TestCase):
def test_reshape_ops(self):
device_opt = core.DeviceOption(caffe2_pb2.IDEEP, 0)
with core.DeviceScope(device_opt):
workspace.FeedBlob('res', np.array([[0, 0, 0, 0]], dtype=np.float32))
workspace.FeedBlob('shape', np.array([1, 4], dtype=np.int32), core.DeviceOption(caffe2_pb2.CPU, 0))
workspace.FeedBlob('input', np.zeros((2, 2), dtype=np.float32))
Reported by Pylint.
Line: 15
Column: 5
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestReShapeOps(TestCase):
def test_reshape_ops(self):
device_opt = core.DeviceOption(caffe2_pb2.IDEEP, 0)
with core.DeviceScope(device_opt):
workspace.FeedBlob('res', np.array([[0, 0, 0, 0]], dtype=np.float32))
workspace.FeedBlob('shape', np.array([1, 4], dtype=np.int32), core.DeviceOption(caffe2_pb2.CPU, 0))
workspace.FeedBlob('input', np.zeros((2, 2), dtype=np.float32))
Reported by Pylint.
Line: 19
Column: 1
device_opt = core.DeviceOption(caffe2_pb2.IDEEP, 0)
with core.DeviceScope(device_opt):
workspace.FeedBlob('res', np.array([[0, 0, 0, 0]], dtype=np.float32))
workspace.FeedBlob('shape', np.array([1, 4], dtype=np.int32), core.DeviceOption(caffe2_pb2.CPU, 0))
workspace.FeedBlob('input', np.zeros((2, 2), dtype=np.float32))
workspace.RunOperatorOnce(core.CreateOperator(
'Reshape', ['input', 'shape'], ['output', 'old_shape']))
assert ((workspace.FetchBlob('output') ==
workspace.FetchBlob('res')).all())
Reported by Pylint.
Line: 23
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
workspace.FeedBlob('input', np.zeros((2, 2), dtype=np.float32))
workspace.RunOperatorOnce(core.CreateOperator(
'Reshape', ['input', 'shape'], ['output', 'old_shape']))
assert ((workspace.FetchBlob('output') ==
workspace.FetchBlob('res')).all())
def test_basic_reshape(self):
_test_reshape(old_shape=(4, 2, 1), new_shape=(2, 4))
_test_reshape(old_shape=(4, 2, 1), new_shape=(2, 4), arg_shape=False)
Reported by Bandit.
torch/autograd/profiler_legacy.py
36 issues
Line: 153
Column: 5
"""
return (record.handle(), record.node_id())
next_id = 0
start_record = None
functions = []
record_stack = []
# '__start_profile' is not guaranteed to be first, so we must find it here
Reported by Pylint.
Line: 156
Column: 5
next_id = 0
start_record = None
functions = []
record_stack = []
# '__start_profile' is not guaranteed to be first, so we must find it here
for record in itertools.chain(*thread_records):
name = record.name()
if start_record is None and name == '__start_profile':
Reported by Pylint.
Line: 1
Column: 1
import torch
import torch.cuda
from torch.autograd.profiler_util import (
EventList, FunctionEvent, MEMORY_EVENT_NAME,
_filter_name, _filter_stack_entry, _rewrite_name
)
from torch.autograd import (
DeviceType, ProfilerConfig, ProfilerState,
Reported by Pylint.
Line: 13
Column: 1
_disable_profiler_legacy, _enable_profiler_legacy,
)
import itertools
from warnings import warn
class profile(object):
"""DEPRECATED: use torch.profiler instead"""
Reported by Pylint.
Line: 14
Column: 1
)
import itertools
from warnings import warn
class profile(object):
"""DEPRECATED: use torch.profiler instead"""
def __init__(
Reported by Pylint.
Line: 17
Column: 1
from warnings import warn
class profile(object):
"""DEPRECATED: use torch.profiler instead"""
def __init__(
self,
enabled=True,
*,
Reported by Pylint.
Line: 17
Column: 1
from warnings import warn
class profile(object):
"""DEPRECATED: use torch.profiler instead"""
def __init__(
self,
enabled=True,
*,
Reported by Pylint.
Line: 17
Column: 1
from warnings import warn
class profile(object):
"""DEPRECATED: use torch.profiler instead"""
def __init__(
self,
enabled=True,
*,
Reported by Pylint.
Line: 51
Column: 5
else:
self.profiler_kind = ProfilerState.CPU
def config(self):
return ProfilerConfig(
self.profiler_kind,
self.record_shapes,
self.profile_memory,
self.with_stack,
Reported by Pylint.
Line: 60
Column: 5
self.with_flops,
self.with_modules)
def __enter__(self):
if not self.enabled:
return
if self.entered:
raise RuntimeError("Profiler context manager is not reentrant")
self.entered = True
Reported by Pylint.
caffe2/python/ideep/conv_op_test.py
36 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.transformations import optimizeForMKLDNN
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.transformations import optimizeForMKLDNN
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 17
Column: 22
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ConvTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 10),
Reported by Pylint.
Line: 99
Column: 54
self.assertGradientChecks(gc, op, inputs, i, [0], threshold=0.01)
@given(batch_size=st.integers(1, 3), **mu.gcs)
def test_depthwise_convolution(self, batch_size, gc, dc):
op = core.CreateOperator(
"Conv",
["X", "w", "b"],
["Y"],
stride=1,
Reported by Pylint.
Line: 147
Column: 13
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
workspace.ResetWorkspace()
workspace.FeedBlob('X', X, dc[1])
workspace.FeedBlob('w', w, dc[1])
workspace.FeedBlob('b', b, dc[1])
Reported by Pylint.
Line: 160
Column: 13
print(Y2.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y2 - Y0)))
self.assertTrue(False)
if __name__ == "__main__":
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
Reported by Pylint.
Line: 18
Column: 1
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ConvTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 10),
input_channels=st.integers(1, 3),
Reported by Pylint.
Line: 31
Column: 5
group=st.integers(1, 2),
**mu.gcs)
@settings(max_examples=10, deadline=None)
def test_convolution(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, training_mode, group, gc, dc):
training = 1 if training_mode else 0
op = core.CreateOperator(
"Conv",
Reported by Pylint.
Line: 31
Column: 5
group=st.integers(1, 2),
**mu.gcs)
@settings(max_examples=10, deadline=None)
def test_convolution(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, training_mode, group, gc, dc):
training = 1 if training_mode else 0
op = core.CreateOperator(
"Conv",
Reported by Pylint.
torch/distributed/pipeline/sync/checkpoint.py
36 issues
Line: 46
Column: 1
from torch import Tensor
import torch.autograd
from .dependency import fork, join
from .microbatch import Batch
from .phony import get_phony
__all__ = ["is_checkpointing", "is_recomputing"]
Reported by Pylint.
Line: 47
Column: 1
import torch.autograd
from .dependency import fork, join
from .microbatch import Batch
from .phony import get_phony
__all__ = ["is_checkpointing", "is_recomputing"]
Reported by Pylint.
Line: 48
Column: 1
from .dependency import fork, join
from .microbatch import Batch
from .phony import get_phony
__all__ = ["is_checkpointing", "is_recomputing"]
Tensors = Sequence[Tensor]
Reported by Pylint.
Line: 62
Column: 5
if TYPE_CHECKING:
from typing_extensions import Protocol
else:
Protocol = object
# Protocol with __call__ instead of Callable can be used as an attribute type.
Reported by Pylint.
Line: 214
Column: 29
pass
def save_rng_states(device: torch.device, rng_states: Deque[RNGStates],) -> None:
""":meth:`Checkpoint.forward` captures the current PyTorch's random number
generator states at CPU and GPU to reuse in :meth:`Recompute.backward`.
.. seealso:: :ref:`Referential Transparency`
Reported by Pylint.
Line: 233
Column: 32
@contextmanager
def restore_rng_states(device: torch.device, rng_states: Deque[RNGStates],) -> Generator[None, None, None]:
""":meth:`Recompute.backward` restores the random number generator states
captured by :func:`save_rng_states` within its context.
.. seealso:: :ref:`Referential Transparency`
Reported by Pylint.
Line: 242
Column: 23
"""
cpu_rng_state, gpu_rng_state = rng_states.pop()
gpu_devices: List[torch.device] = []
if device.type == "cuda":
gpu_devices.append(device)
with torch.random.fork_rng(gpu_devices):
torch.set_rng_state(cpu_rng_state)
Reported by Pylint.
Line: 70
Column: 24
# Protocol with __call__ instead of Callable can be used as an attribute type.
# See: https://github.com/python/mypy/issues/708#issuecomment-561735949
class Function(Protocol):
def __call__(self, input: TensorOrTensors) -> TensorOrTensors:
...
def checkpoint(function: Function, input):
"""Makes a checkpoint with a simple interface like
Reported by Pylint.
Line: 74
Column: 36
...
def checkpoint(function: Function, input):
"""Makes a checkpoint with a simple interface like
:func:`torch.utils.checkpoint.checkpoint`. It's only used to test or debug
:class:`Checkpoint` and :class:`Recompute` without boilerplate.
"""
batch = Batch(input)
Reported by Pylint.
Line: 133
Column: 5
class ThreadLocal(threading.local):
def __init__(self) -> None:
self.is_checkpointing = False
self.is_recomputing = False
thread_local = ThreadLocal()
Reported by Pylint.
torch/distributed/algorithms/ddp_comm_hooks/quantization_hooks.py
36 issues
Line: 7
Column: 9
def _quantize_per_tensor_cuda(x, scale, zero_point):
y = torch.round(x / scale) + zero_point
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
def _dequantize_per_tensor_cuda(y, scale, zero_point):
Reported by Pylint.
Line: 8
Column: 35
def _quantize_per_tensor_cuda(x, scale, zero_point):
y = torch.round(x / scale) + zero_point
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
def _dequantize_per_tensor_cuda(y, scale, zero_point):
x = scale * (y.to(torch.float32) - zero_point)
Reported by Pylint.
Line: 8
Column: 9
def _quantize_per_tensor_cuda(x, scale, zero_point):
y = torch.round(x / scale) + zero_point
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
def _dequantize_per_tensor_cuda(y, scale, zero_point):
x = scale * (y.to(torch.float32) - zero_point)
Reported by Pylint.
Line: 13
Column: 23
def _dequantize_per_tensor_cuda(y, scale, zero_point):
x = scale * (y.to(torch.float32) - zero_point)
return x
def _quantize_per_channel_cuda(x, scale, zero_point):
y = torch.zeros(x.size(), device=x.device)
Reported by Pylint.
Line: 18
Column: 9
def _quantize_per_channel_cuda(x, scale, zero_point):
y = torch.zeros(x.size(), device=x.device)
for i in range(x.size()[0]):
y[i, :] = torch.round(x[i, :] / scale[i]) + zero_point[i]
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
Reported by Pylint.
Line: 20
Column: 19
def _quantize_per_channel_cuda(x, scale, zero_point):
y = torch.zeros(x.size(), device=x.device)
for i in range(x.size()[0]):
y[i, :] = torch.round(x[i, :] / scale[i]) + zero_point[i]
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
def _dequantize_per_channel_cuda(y, scale, zero_point):
Reported by Pylint.
Line: 21
Column: 35
y = torch.zeros(x.size(), device=x.device)
for i in range(x.size()[0]):
y[i, :] = torch.round(x[i, :] / scale[i]) + zero_point[i]
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
def _dequantize_per_channel_cuda(y, scale, zero_point):
y = y.to(torch.float32).cuda(y.device)
Reported by Pylint.
Line: 21
Column: 9
y = torch.zeros(x.size(), device=x.device)
for i in range(x.size()[0]):
y[i, :] = torch.round(x[i, :] / scale[i]) + zero_point[i]
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
def _dequantize_per_channel_cuda(y, scale, zero_point):
y = y.to(torch.float32).cuda(y.device)
Reported by Pylint.
Line: 26
Column: 14
def _dequantize_per_channel_cuda(y, scale, zero_point):
y = y.to(torch.float32).cuda(y.device)
x = torch.zeros_like(y, device=y.device)
for i in range(x.size()[0]):
x[i, :] = scale[i] * (y[i, :] - zero_point[i])
return x
Reported by Pylint.
Line: 27
Column: 9
def _dequantize_per_channel_cuda(y, scale, zero_point):
y = y.to(torch.float32).cuda(y.device)
x = torch.zeros_like(y, device=y.device)
for i in range(x.size()[0]):
x[i, :] = scale[i] * (y[i, :] - zero_point[i])
return x
Reported by Pylint.
caffe2/python/operator_test/upsample_op_test.py
35 issues
Line: 23
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 24
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestUpSample(serial.SerializedTestCase):
Reported by Pylint.
Line: 71
Column: 24
]
for op, inputs in ops:
def ref(X, scales=None):
output_height = np.int32(height * height_scale)
output_width = np.int32(width * width_scale)
Y = np.random.rand(
batch_size, num_channels, output_height,
Reported by Pylint.
Line: 159
Column: 28
]
for op, inputs in ops:
def ref(dY, X, scales=None):
dX = np.zeros_like(X)
rheight = ((height - 1) / (output_height - 1)
if output_height > 1
else float(0))
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 26
Column: 1
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestUpSample(serial.SerializedTestCase):
@given(height_scale=st.floats(1.0, 4.0) | st.just(2.0),
width_scale=st.floats(1.0, 4.0) | st.just(2.0),
Reported by Pylint.
Line: 29
Column: 1
import unittest
class TestUpSample(serial.SerializedTestCase):
@given(height_scale=st.floats(1.0, 4.0) | st.just(2.0),
width_scale=st.floats(1.0, 4.0) | st.just(2.0),
height=st.integers(4, 32),
width=st.integers(4, 32),
num_channels=st.integers(1, 4),
Reported by Pylint.
Line: 39
Column: 5
seed=st.integers(0, 65535),
**hu.gcs)
@settings(max_examples=50, deadline=None)
def test_upsample(self, height_scale, width_scale, height, width,
num_channels, batch_size, seed,
gc, dc):
np.random.seed(seed)
Reported by Pylint.
Line: 39
Column: 5
seed=st.integers(0, 65535),
**hu.gcs)
@settings(max_examples=50, deadline=None)
def test_upsample(self, height_scale, width_scale, height, width,
num_channels, batch_size, seed,
gc, dc):
np.random.seed(seed)
Reported by Pylint.
Line: 39
Column: 5
seed=st.integers(0, 65535),
**hu.gcs)
@settings(max_examples=50, deadline=None)
def test_upsample(self, height_scale, width_scale, height, width,
num_channels, batch_size, seed,
gc, dc):
np.random.seed(seed)
Reported by Pylint.
caffe2/python/visualize.py
35 issues
Line: 48
Column: 20
raise ValueError("The input patch shape isn't correct.")
# determine color
if len(patch.shape) == 2 and cmap is None:
cmap = cm.gray
pyplot.imshow(patch, cmap=cmap)
return patch
def ShowMultiple(self, patches, ncols=None, cmap=None, bg_func=np.mean):
"""Visualize multiple patches.
Reported by Pylint.
Line: 76
Column: 28
patches = patches.reshape(patches.shape[:-1])
image_shape = tuple(image_size)
if cmap is None:
cmap = cm.gray
elif patches.shape[3] == 3:
# color patches
image_shape = tuple(image_size) + (3, )
else:
raise ValueError("The input patch shape isn't expected.")
Reported by Pylint.
Line: 85
Column: 24
else:
image_shape = tuple(image_size)
if cmap is None:
cmap = cm.gray
image = np.ones(image_shape) * bg_func(patches)
for pid in range(num_patches):
row = pid // ncols * patch_size_expand[0]
col = pid % ncols * patch_size_expand[1]
image[row:row+patches.shape[1], col:col+patches.shape[2]] = \
Reported by Pylint.
Line: 16
Column: 1
from matplotlib import cm, pyplot
def ChannelFirst(arr):
"""Convert a HWC array to CHW."""
ndim = arr.ndim
return arr.swapaxes(ndim - 1, ndim - 2).swapaxes(ndim - 2, ndim - 3)
Reported by Pylint.
Line: 22
Column: 1
return arr.swapaxes(ndim - 1, ndim - 2).swapaxes(ndim - 2, ndim - 3)
def ChannelLast(arr):
"""Convert a CHW array to HWC."""
ndim = arr.ndim
return arr.swapaxes(ndim - 3, ndim - 2).swapaxes(ndim - 2, ndim - 1)
Reported by Pylint.
Line: 28
Column: 1
return arr.swapaxes(ndim - 3, ndim - 2).swapaxes(ndim - 2, ndim - 1)
class PatchVisualizer(object):
"""PatchVisualizer visualizes patches.
"""
def __init__(self, gap=1):
self.gap = gap
Reported by Pylint.
Line: 35
Column: 5
def __init__(self, gap=1):
self.gap = gap
def ShowSingle(self, patch, cmap=None):
"""Visualizes one single patch.
The input patch could be a vector (in which case we try to infer the shape
of the patch), a 2-D matrix, or a 3-D matrix whose 3rd dimension has 3
channels.
Reported by Pylint.
Line: 52
Column: 5
pyplot.imshow(patch, cmap=cmap)
return patch
def ShowMultiple(self, patches, ncols=None, cmap=None, bg_func=np.mean):
"""Visualize multiple patches.
In the passed in patches matrix, each row is a patch, in the shape of either
n*n, n*n*1 or n*n*3, either in a flattened format (so patches would be a
2-D array), or a multi-dimensional tensor. We will try our best to figure
Reported by Pylint.
Line: 96
Column: 5
pyplot.axis('off')
return image
def ShowImages(self, patches, *args, **kwargs):
"""Similar to ShowMultiple, but always normalize the values between 0 and 1
for better visualization of image-type data.
"""
patches = patches - np.min(patches)
patches /= np.max(patches) + np.finfo(np.float64).eps
Reported by Pylint.
Line: 104
Column: 5
patches /= np.max(patches) + np.finfo(np.float64).eps
return self.ShowMultiple(patches, *args, **kwargs)
def ShowChannels(self, patch, cmap=None, bg_func=np.mean):
""" This function shows the channels of a patch.
The incoming patch should have shape [w, h, num_channels], and each channel
will be visualized as a separate gray patch.
"""
Reported by Pylint.
test/cpp/jit/tests_setup.py
35 issues
Line: 3
Column: 1
import sys
import os
import torch
class Setup(object):
def setup(self):
raise NotImplementedError()
Reported by Pylint.
Line: 20
Column: 13
def shutdown(self):
if os.path.exists(self.path):
os.remove(self.path)
pass
class EvalModeForLoadedModule(FileSetup):
path = 'dropout_model.pt'
Reported by Pylint.
Line: 1
Column: 1
import sys
import os
import torch
class Setup(object):
def setup(self):
raise NotImplementedError()
Reported by Pylint.
Line: 6
Column: 1
import torch
class Setup(object):
def setup(self):
raise NotImplementedError()
def shutdown(self):
raise NotImplementedError()
Reported by Pylint.
Line: 6
Column: 1
import torch
class Setup(object):
def setup(self):
raise NotImplementedError()
def shutdown(self):
raise NotImplementedError()
Reported by Pylint.
Line: 7
Column: 5
class Setup(object):
def setup(self):
raise NotImplementedError()
def shutdown(self):
raise NotImplementedError()
Reported by Pylint.
Line: 10
Column: 5
def setup(self):
raise NotImplementedError()
def shutdown(self):
raise NotImplementedError()
class FileSetup(object):
path = None
Reported by Pylint.
Line: 14
Column: 1
raise NotImplementedError()
class FileSetup(object):
path = None
def shutdown(self):
if os.path.exists(self.path):
os.remove(self.path)
Reported by Pylint.
Line: 14
Column: 1
raise NotImplementedError()
class FileSetup(object):
path = None
def shutdown(self):
if os.path.exists(self.path):
os.remove(self.path)
Reported by Pylint.
Line: 14
Column: 1
raise NotImplementedError()
class FileSetup(object):
path = None
def shutdown(self):
if os.path.exists(self.path):
os.remove(self.path)
Reported by Pylint.