The following issues were found
torch/nn/intrinsic/modules/__init__.py
14 issues
Line: 1
Column: 1
from .fused import _FusedModule
from .fused import ConvBn1d
from .fused import ConvBn2d
from .fused import ConvBn3d
from .fused import ConvBnReLU1d
from .fused import ConvBnReLU2d
from .fused import ConvBnReLU3d
from .fused import ConvReLU1d
from .fused import ConvReLU2d
Reported by Pylint.
Line: 2
Column: 1
from .fused import _FusedModule
from .fused import ConvBn1d
from .fused import ConvBn2d
from .fused import ConvBn3d
from .fused import ConvBnReLU1d
from .fused import ConvBnReLU2d
from .fused import ConvBnReLU3d
from .fused import ConvReLU1d
from .fused import ConvReLU2d
Reported by Pylint.
Line: 3
Column: 1
from .fused import _FusedModule
from .fused import ConvBn1d
from .fused import ConvBn2d
from .fused import ConvBn3d
from .fused import ConvBnReLU1d
from .fused import ConvBnReLU2d
from .fused import ConvBnReLU3d
from .fused import ConvReLU1d
from .fused import ConvReLU2d
Reported by Pylint.
Line: 4
Column: 1
from .fused import _FusedModule
from .fused import ConvBn1d
from .fused import ConvBn2d
from .fused import ConvBn3d
from .fused import ConvBnReLU1d
from .fused import ConvBnReLU2d
from .fused import ConvBnReLU3d
from .fused import ConvReLU1d
from .fused import ConvReLU2d
Reported by Pylint.
Line: 5
Column: 1
from .fused import ConvBn1d
from .fused import ConvBn2d
from .fused import ConvBn3d
from .fused import ConvBnReLU1d
from .fused import ConvBnReLU2d
from .fused import ConvBnReLU3d
from .fused import ConvReLU1d
from .fused import ConvReLU2d
from .fused import ConvReLU3d
Reported by Pylint.
Line: 6
Column: 1
from .fused import ConvBn2d
from .fused import ConvBn3d
from .fused import ConvBnReLU1d
from .fused import ConvBnReLU2d
from .fused import ConvBnReLU3d
from .fused import ConvReLU1d
from .fused import ConvReLU2d
from .fused import ConvReLU3d
from .fused import LinearReLU
Reported by Pylint.
Line: 7
Column: 1
from .fused import ConvBn3d
from .fused import ConvBnReLU1d
from .fused import ConvBnReLU2d
from .fused import ConvBnReLU3d
from .fused import ConvReLU1d
from .fused import ConvReLU2d
from .fused import ConvReLU3d
from .fused import LinearReLU
from .fused import BNReLU2d
Reported by Pylint.
Line: 8
Column: 1
from .fused import ConvBnReLU1d
from .fused import ConvBnReLU2d
from .fused import ConvBnReLU3d
from .fused import ConvReLU1d
from .fused import ConvReLU2d
from .fused import ConvReLU3d
from .fused import LinearReLU
from .fused import BNReLU2d
from .fused import BNReLU3d
Reported by Pylint.
Line: 9
Column: 1
from .fused import ConvBnReLU2d
from .fused import ConvBnReLU3d
from .fused import ConvReLU1d
from .fused import ConvReLU2d
from .fused import ConvReLU3d
from .fused import LinearReLU
from .fused import BNReLU2d
from .fused import BNReLU3d
Reported by Pylint.
Line: 10
Column: 1
from .fused import ConvBnReLU3d
from .fused import ConvReLU1d
from .fused import ConvReLU2d
from .fused import ConvReLU3d
from .fused import LinearReLU
from .fused import BNReLU2d
from .fused import BNReLU3d
Reported by Pylint.
torch/utils/_crash_handler.py
14 issues
Line: 7
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b108_hardcoded_tmp_directory.html
import torch
DEFAULT_MINIDUMP_DIR = "/tmp/pytorch_crashes"
def enable_minidumps(directory=DEFAULT_MINIDUMP_DIR):
if sys.platform != "linux":
raise RuntimeError("Minidump collection is currently only implemented for Linux platforms")
Reported by Bandit.
Line: 18
Column: 5
elif not os.path.exists(directory):
raise RuntimeError(f"Directory does not exist: {directory}")
torch._C._enable_minidumps(directory)
def enable_minidumps_on_exceptions():
torch._C._enable_minidumps_on_exceptions()
Reported by Pylint.
Line: 18
Column: 5
elif not os.path.exists(directory):
raise RuntimeError(f"Directory does not exist: {directory}")
torch._C._enable_minidumps(directory)
def enable_minidumps_on_exceptions():
torch._C._enable_minidumps_on_exceptions()
Reported by Pylint.
Line: 22
Column: 5
def enable_minidumps_on_exceptions():
torch._C._enable_minidumps_on_exceptions()
def disable_minidumps():
torch._C._disable_minidumps()
Reported by Pylint.
Line: 22
Column: 5
def enable_minidumps_on_exceptions():
torch._C._enable_minidumps_on_exceptions()
def disable_minidumps():
torch._C._disable_minidumps()
Reported by Pylint.
Line: 26
Column: 5
def disable_minidumps():
torch._C._disable_minidumps()
Reported by Pylint.
Line: 26
Column: 5
def disable_minidumps():
torch._C._disable_minidumps()
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
import pathlib
import torch
DEFAULT_MINIDUMP_DIR = "/tmp/pytorch_crashes"
def enable_minidumps(directory=DEFAULT_MINIDUMP_DIR):
Reported by Pylint.
Line: 9
Column: 1
DEFAULT_MINIDUMP_DIR = "/tmp/pytorch_crashes"
def enable_minidumps(directory=DEFAULT_MINIDUMP_DIR):
if sys.platform != "linux":
raise RuntimeError("Minidump collection is currently only implemented for Linux platforms")
if directory == DEFAULT_MINIDUMP_DIR:
pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
Reported by Pylint.
Line: 21
Column: 1
torch._C._enable_minidumps(directory)
def enable_minidumps_on_exceptions():
torch._C._enable_minidumps_on_exceptions()
def disable_minidumps():
torch._C._disable_minidumps()
Reported by Pylint.
caffe2/python/operator_test/string_ops_test.py
14 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
def _string_lists(alphabet=None):
return st.lists(
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 21
Column: 1
max_size=3)
class TestStringOps(serial.SerializedTestCase):
@given(strings=_string_lists())
@settings(deadline=10000)
def test_string_prefix(self, strings):
length = 3
# although we are utf-8 encoding below to avoid python exceptions,
Reported by Pylint.
Line: 24
Column: 5
class TestStringOps(serial.SerializedTestCase):
@given(strings=_string_lists())
@settings(deadline=10000)
def test_string_prefix(self, strings):
length = 3
# although we are utf-8 encoding below to avoid python exceptions,
# StringPrefix op deals with byte-length prefixes, which may produce
# an invalid utf-8 string. The goal here is just to avoid python
# complaining about the unicode -> str conversion.
Reported by Pylint.
Line: 39
Column: 9
np.array([a[:length] for a in strings], dtype=object),
)
op = core.CreateOperator(
'StringPrefix',
['strings'],
['stripped'],
length=length)
self.assertReferenceChecks(
Reported by Pylint.
Line: 52
Column: 5
@given(strings=_string_lists())
@settings(deadline=10000)
def test_string_suffix(self, strings):
length = 3
strings = np.array(
[a.encode('utf-8') for a in strings], dtype=np.object
)
Reported by Pylint.
Line: 63
Column: 9
np.array([a[-length:] for a in strings], dtype=object),
)
op = core.CreateOperator(
'StringSuffix',
['strings'],
['stripped'],
length=length)
self.assertReferenceChecks(
Reported by Pylint.
Line: 76
Column: 5
@given(strings=st.text(alphabet=['a', 'b']))
@settings(deadline=10000)
def test_string_starts_with(self, strings):
prefix = 'a'
strings = np.array(
[str(a) for a in strings], dtype=np.object
)
Reported by Pylint.
Line: 87
Column: 9
np.array([a.startswith(prefix) for a in strings], dtype=bool),
)
op = core.CreateOperator(
'StringStartsWith',
['strings'],
['bools'],
prefix=prefix)
self.assertReferenceChecks(
Reported by Pylint.
caffe2/python/operator_test/dense_vector_to_id_list_op_test.py
14 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
@st.composite
Reported by Pylint.
Line: 11
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
@st.composite
def id_list_batch(draw):
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 16
Column: 1
@st.composite
def id_list_batch(draw):
batch_size = draw(st.integers(2, 2))
values_dtype = np.float32
inputs = []
sample_size = draw(st.integers(5, 10))
for _ in range(batch_size):
Reported by Pylint.
Line: 27
Column: 1
return [np.array(inputs)]
def dense_vector_to_id_list_ref(*arg):
arg = arg[0]
batch_size = len(arg)
assert batch_size > 0
out_length = []
out_values = []
Reported by Pylint.
Line: 30
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def dense_vector_to_id_list_ref(*arg):
arg = arg[0]
batch_size = len(arg)
assert batch_size > 0
out_length = []
out_values = []
for row in arg:
length = 0
for idx, entry in enumerate(row):
Reported by Bandit.
Line: 43
Column: 1
return (out_length, out_values)
class TestDenseVectorToIdList(hu.HypothesisTestCase):
def test_dense_vector_to_id_list_ref(self):
# Verify that the reference implementation is correct!
dense_input = np.array(
[[1, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 1],
Reported by Pylint.
Line: 44
Column: 5
class TestDenseVectorToIdList(hu.HypothesisTestCase):
def test_dense_vector_to_id_list_ref(self):
# Verify that the reference implementation is correct!
dense_input = np.array(
[[1, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 0, 1]],
Reported by Pylint.
Line: 44
Column: 5
class TestDenseVectorToIdList(hu.HypothesisTestCase):
def test_dense_vector_to_id_list_ref(self):
# Verify that the reference implementation is correct!
dense_input = np.array(
[[1, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 0, 1]],
Reported by Pylint.
test/distributed/test_distributed_spawn.py
14 issues
Line: 5
Column: 1
import os
import sys
import torch
import torch.distributed as dist
torch.backends.cuda.matmul.allow_tf32 = False
if not dist.is_available():
Reported by Pylint.
Line: 6
Column: 1
import sys
import torch
import torch.distributed as dist
torch.backends.cuda.matmul.allow_tf32 = False
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
Reported by Pylint.
Line: 14
Column: 1
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN, NO_MULTIPROCESSING_SPAWN
from torch.testing._internal.distributed.distributed_test import (
DistributedTest, TestDistBackend
)
if TEST_WITH_DEV_DBG_ASAN:
Reported by Pylint.
Line: 15
Column: 1
sys.exit(0)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN, NO_MULTIPROCESSING_SPAWN
from torch.testing._internal.distributed.distributed_test import (
DistributedTest, TestDistBackend
)
if TEST_WITH_DEV_DBG_ASAN:
print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
Reported by Pylint.
Line: 30
Column: 53
BACKEND = os.environ["BACKEND"]
if BACKEND == "gloo" or BACKEND == "nccl":
class TestDistBackendWithSpawn(TestDistBackend, DistributedTest._DistTestBase):
def setUp(self):
super().setUp()
self._spawn_processes()
torch.backends.cudnn.flags(allow_tf32=False).__enter__()
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
import torch
import torch.distributed as dist
torch.backends.cuda.matmul.allow_tf32 = False
Reported by Pylint.
Line: 14
Column: 1
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN, NO_MULTIPROCESSING_SPAWN
from torch.testing._internal.distributed.distributed_test import (
DistributedTest, TestDistBackend
)
if TEST_WITH_DEV_DBG_ASAN:
Reported by Pylint.
Line: 14
Column: 1
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN, NO_MULTIPROCESSING_SPAWN
from torch.testing._internal.distributed.distributed_test import (
DistributedTest, TestDistBackend
)
if TEST_WITH_DEV_DBG_ASAN:
Reported by Pylint.
Line: 15
Column: 1
sys.exit(0)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN, NO_MULTIPROCESSING_SPAWN
from torch.testing._internal.distributed.distributed_test import (
DistributedTest, TestDistBackend
)
if TEST_WITH_DEV_DBG_ASAN:
print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
Reported by Pylint.
Line: 29
Column: 4
BACKEND = os.environ["BACKEND"]
if BACKEND == "gloo" or BACKEND == "nccl":
class TestDistBackendWithSpawn(TestDistBackend, DistributedTest._DistTestBase):
def setUp(self):
super().setUp()
self._spawn_processes()
Reported by Pylint.
caffe2/quantization/server/int8_quant_scheme_blob_fill_test.py
14 issues
Line: 20
Column: 1
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
from caffe2.quantization.server import dnnlowp_pybind11
class TestInt8QuantSchemeBlobFillOperator(hu.HypothesisTestCase):
@given(
**hu.gcs_cpu_only
Reported by Pylint.
Line: 21
Column: 1
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
from caffe2.quantization.server import dnnlowp_pybind11
class TestInt8QuantSchemeBlobFillOperator(hu.HypothesisTestCase):
@given(
**hu.gcs_cpu_only
)
Reported by Pylint.
Line: 28
Column: 19
**hu.gcs_cpu_only
)
def test_int8_quant_scheme_blob_fill_op(
self, gc, dc
):
# Build a net to generate qscheme blob using the Int8QuantSchemeBlobFill op
gen_quant_scheme_net = core.Net("gen_quant_scheme")
gen_quant_scheme_op = core.CreateOperator(
"Int8QuantSchemeBlobFill",
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 21
Column: 1
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
from caffe2.quantization.server import dnnlowp_pybind11
class TestInt8QuantSchemeBlobFillOperator(hu.HypothesisTestCase):
@given(
**hu.gcs_cpu_only
)
Reported by Pylint.
Line: 23
Column: 1
from hypothesis import given
from caffe2.quantization.server import dnnlowp_pybind11
class TestInt8QuantSchemeBlobFillOperator(hu.HypothesisTestCase):
@given(
**hu.gcs_cpu_only
)
def test_int8_quant_scheme_blob_fill_op(
self, gc, dc
Reported by Pylint.
Line: 26
Column: 5
class TestInt8QuantSchemeBlobFillOperator(hu.HypothesisTestCase):
@given(
**hu.gcs_cpu_only
)
def test_int8_quant_scheme_blob_fill_op(
self, gc, dc
):
# Build a net to generate qscheme blob using the Int8QuantSchemeBlobFill op
gen_quant_scheme_net = core.Net("gen_quant_scheme")
Reported by Pylint.
Line: 26
Column: 5
class TestInt8QuantSchemeBlobFillOperator(hu.HypothesisTestCase):
@given(
**hu.gcs_cpu_only
)
def test_int8_quant_scheme_blob_fill_op(
self, gc, dc
):
# Build a net to generate qscheme blob using the Int8QuantSchemeBlobFill op
gen_quant_scheme_net = core.Net("gen_quant_scheme")
Reported by Pylint.
Line: 26
Column: 5
class TestInt8QuantSchemeBlobFillOperator(hu.HypothesisTestCase):
@given(
**hu.gcs_cpu_only
)
def test_int8_quant_scheme_blob_fill_op(
self, gc, dc
):
# Build a net to generate qscheme blob using the Int8QuantSchemeBlobFill op
gen_quant_scheme_net = core.Net("gen_quant_scheme")
Reported by Pylint.
Line: 26
Column: 5
class TestInt8QuantSchemeBlobFillOperator(hu.HypothesisTestCase):
@given(
**hu.gcs_cpu_only
)
def test_int8_quant_scheme_blob_fill_op(
self, gc, dc
):
# Build a net to generate qscheme blob using the Int8QuantSchemeBlobFill op
gen_quant_scheme_net = core.Net("gen_quant_scheme")
Reported by Pylint.
caffe2/python/operator_test/fused_nbit_rowwise_test_helper.py
14 issues
Line: 13
Column: 5
xmin, xmax = np.min(x), np.max(x)
stepsize = (xmax - xmin) / np.float32(n_bins)
min_bins = np.float32(n_bins) * (np.float32(1) - np.float32(ratio))
xq, loss = _compress_uniform_simplified(x, bit_rate, xmin, xmax)
solutions = [] # [(left, right, loss)] # local optima solution
cur_min, cur_max, cur_loss = xmin, xmax, loss
thr = min_bins * stepsize
Reported by Pylint.
Line: 33
Column: 22
# found a local optima
solutions.append((cur_min, cur_max, cur_loss))
if loss1 < loss2:
cur_min, cur_max, cur_loss = cur_min + stepsize, cur_max, loss1
else:
cur_min, cur_max, cur_loss = cur_min, cur_max - stepsize, loss2
if len(solutions):
best = solutions[0]
for solution in solutions:
Reported by Pylint.
Line: 35
Column: 13
if loss1 < loss2:
cur_min, cur_max, cur_loss = cur_min + stepsize, cur_max, loss1
else:
cur_min, cur_max, cur_loss = cur_min, cur_max - stepsize, loss2
if len(solutions):
best = solutions[0]
for solution in solutions:
if solution[-1] < best[-1]:
best = solution
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
# Note we explicitly cast variables to np.float32 in a couple of places to avoid
# the default casting in Python often resuling in double precision and to make
# sure we're doing the same numerics as C++ code.
def param_search_greedy(x, bit_rate, n_bins=200, ratio=0.16):
Reported by Pylint.
Line: 9
Column: 1
# Note we explicitly cast variables to np.float32 in a couple of places to avoid
# the default casting in Python often resuling in double precision and to make
# sure we're doing the same numerics as C++ code.
def param_search_greedy(x, bit_rate, n_bins=200, ratio=0.16):
xmin, xmax = np.min(x), np.max(x)
stepsize = (xmax - xmin) / np.float32(n_bins)
min_bins = np.float32(n_bins) * (np.float32(1) - np.float32(ratio))
xq, loss = _compress_uniform_simplified(x, bit_rate, xmin, xmax)
Reported by Pylint.
Line: 9
Column: 1
# Note we explicitly cast variables to np.float32 in a couple of places to avoid
# the default casting in Python often resuling in double precision and to make
# sure we're doing the same numerics as C++ code.
def param_search_greedy(x, bit_rate, n_bins=200, ratio=0.16):
xmin, xmax = np.min(x), np.max(x)
stepsize = (xmax - xmin) / np.float32(n_bins)
min_bins = np.float32(n_bins) * (np.float32(1) - np.float32(ratio))
xq, loss = _compress_uniform_simplified(x, bit_rate, xmin, xmax)
Reported by Pylint.
Line: 9
Column: 1
# Note we explicitly cast variables to np.float32 in a couple of places to avoid
# the default casting in Python often resuling in double precision and to make
# sure we're doing the same numerics as C++ code.
def param_search_greedy(x, bit_rate, n_bins=200, ratio=0.16):
xmin, xmax = np.min(x), np.max(x)
stepsize = (xmax - xmin) / np.float32(n_bins)
min_bins = np.float32(n_bins) * (np.float32(1) - np.float32(ratio))
xq, loss = _compress_uniform_simplified(x, bit_rate, xmin, xmax)
Reported by Pylint.
Line: 13
Column: 5
xmin, xmax = np.min(x), np.max(x)
stepsize = (xmax - xmin) / np.float32(n_bins)
min_bins = np.float32(n_bins) * (np.float32(1) - np.float32(ratio))
xq, loss = _compress_uniform_simplified(x, bit_rate, xmin, xmax)
solutions = [] # [(left, right, loss)] # local optima solution
cur_min, cur_max, cur_loss = xmin, xmax, loss
thr = min_bins * stepsize
Reported by Pylint.
Line: 21
Column: 9
thr = min_bins * stepsize
while cur_min + thr < cur_max:
# move left
xq, loss1 = _compress_uniform_simplified(
x, bit_rate, cur_min + stepsize, cur_max
)
# move right
xq, loss2 = _compress_uniform_simplified(
x, bit_rate, cur_min, cur_max - stepsize
Reported by Pylint.
Line: 25
Column: 9
x, bit_rate, cur_min + stepsize, cur_max
)
# move right
xq, loss2 = _compress_uniform_simplified(
x, bit_rate, cur_min, cur_max - stepsize
)
if cur_loss < loss1 and cur_loss < loss2:
# found a local optima
Reported by Pylint.
test/distributed/pipeline/sync/conftest.py
14 issues
Line: 8
Column: 1
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import pytest
import torch
import torch.distributed as dist
@pytest.fixture(autouse=True)
Reported by Pylint.
Line: 10
Column: 1
import tempfile
import pytest
import torch
import torch.distributed as dist
@pytest.fixture(autouse=True)
def manual_seed_zero():
torch.manual_seed(0)
Reported by Pylint.
Line: 11
Column: 1
import pytest
import torch
import torch.distributed as dist
@pytest.fixture(autouse=True)
def manual_seed_zero():
torch.manual_seed(0)
Reported by Pylint.
Line: 27
Column: 5
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
def cuda_sleep(seconds):
Reported by Pylint.
Line: 32
Column: 5
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
def cuda_sleep(seconds):
torch.cuda._sleep(int(seconds * cycles_per_ms * 1000))
return cuda_sleep
Reported by Pylint.
Line: 33
Column: 9
cycles_per_ms = 1000000 / start.elapsed_time(end)
def cuda_sleep(seconds):
torch.cuda._sleep(int(seconds * cycles_per_ms * 1000))
return cuda_sleep
def pytest_report_header():
Reported by Pylint.
Line: 42
Column: 15
return f"torch: {torch.__version__}"
@pytest.fixture
def setup_rpc(scope="session"):
file = tempfile.NamedTemporaryFile()
dist.rpc.init_rpc(
name="worker0",
rank=0,
world_size=1,
Reported by Pylint.
Line: 55
Column: 33
yield
dist.rpc.shutdown()
def pytest_ignore_collect(path, config):
"Skip this directory if distributed modules are not enabled."
return not dist.is_available()
Reported by Pylint.
Line: 55
Column: 27
yield
dist.rpc.shutdown()
def pytest_ignore_collect(path, config):
"Skip this directory if distributed modules are not enabled."
return not dist.is_available()
Reported by Pylint.
Line: 1
Column: 1
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import pytest
Reported by Pylint.
caffe2/quantization/server/concat_dnnlowp_op_test.py
14 issues
Line: 6
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
Reported by Pylint.
Line: 27
Column: 66
**hu.gcs_cpu_only
)
def test_dnnlowp_concat_int(
self, dim1, dim2, axis, in_quantized, out_quantized, gc, dc
):
# X has scale 1, so exactly represented after quantization
min_ = -100
max_ = min_ + 255
Reported by Pylint.
Line: 1
Column: 1
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
Reported by Pylint.
Line: 17
Column: 1
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPConcatOpTest(hu.HypothesisTestCase):
@given(
dim1=st.integers(0, 256),
dim2=st.integers(0, 256),
axis=st.integers(0, 1),
in_quantized=st.booleans(),
Reported by Pylint.
Line: 25
Column: 5
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_concat_int(
self, dim1, dim2, axis, in_quantized, out_quantized, gc, dc
):
# X has scale 1, so exactly represented after quantization
Reported by Pylint.
Line: 25
Column: 5
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_concat_int(
self, dim1, dim2, axis, in_quantized, out_quantized, gc, dc
):
# X has scale 1, so exactly represented after quantization
Reported by Pylint.
Line: 25
Column: 5
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_concat_int(
self, dim1, dim2, axis, in_quantized, out_quantized, gc, dc
):
# X has scale 1, so exactly represented after quantization
Reported by Pylint.
Line: 25
Column: 5
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_concat_int(
self, dim1, dim2, axis, in_quantized, out_quantized, gc, dc
):
# X has scale 1, so exactly represented after quantization
Reported by Pylint.
Line: 25
Column: 5
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_concat_int(
self, dim1, dim2, axis, in_quantized, out_quantized, gc, dc
):
# X has scale 1, so exactly represented after quantization
Reported by Pylint.
caffe2/python/operator_test/negate_gradient_op_test.py
14 issues
Line: 9
Column: 1
from caffe2.python import workspace, core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
class TestNegateGradient(serial.SerializedTestCase):
Reported by Pylint.
Line: 10
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
class TestNegateGradient(serial.SerializedTestCase):
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import workspace, core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
Reported by Pylint.
Line: 14
Column: 1
import numpy as np
class TestNegateGradient(serial.SerializedTestCase):
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_forward(self, X, inplace, gc, dc):
def neg_grad_ref(X):
Reported by Pylint.
Line: 18
Column: 5
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_forward(self, X, inplace, gc, dc):
def neg_grad_ref(X):
return (X,)
op = core.CreateOperator("NegateGradient", ["X"], ["Y" if not inplace else "X"])
self.assertReferenceChecks(gc, op, [X], neg_grad_ref)
Reported by Pylint.
Line: 18
Column: 5
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_forward(self, X, inplace, gc, dc):
def neg_grad_ref(X):
return (X,)
op = core.CreateOperator("NegateGradient", ["X"], ["Y" if not inplace else "X"])
self.assertReferenceChecks(gc, op, [X], neg_grad_ref)
Reported by Pylint.
Line: 18
Column: 5
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_forward(self, X, inplace, gc, dc):
def neg_grad_ref(X):
return (X,)
op = core.CreateOperator("NegateGradient", ["X"], ["Y" if not inplace else "X"])
self.assertReferenceChecks(gc, op, [X], neg_grad_ref)
Reported by Pylint.
Line: 18
Column: 5
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_forward(self, X, inplace, gc, dc):
def neg_grad_ref(X):
return (X,)
op = core.CreateOperator("NegateGradient", ["X"], ["Y" if not inplace else "X"])
self.assertReferenceChecks(gc, op, [X], neg_grad_ref)
Reported by Pylint.
Line: 19
Column: 9
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_forward(self, X, inplace, gc, dc):
def neg_grad_ref(X):
return (X,)
op = core.CreateOperator("NegateGradient", ["X"], ["Y" if not inplace else "X"])
self.assertReferenceChecks(gc, op, [X], neg_grad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
Reported by Pylint.
Line: 22
Column: 9
def neg_grad_ref(X):
return (X,)
op = core.CreateOperator("NegateGradient", ["X"], ["Y" if not inplace else "X"])
self.assertReferenceChecks(gc, op, [X], neg_grad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
@given(size=st.lists(st.integers(min_value=1, max_value=20),
min_size=1, max_size=5))
Reported by Pylint.