The following issues were found
caffe2/contrib/nnpack/nnpack_ops_test.py
36 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, assume, settings
import numpy as np
import time
import os
from caffe2.python import core, dyndep
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, assume, settings
import numpy as np
import time
import os
from caffe2.python import core, dyndep
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given, assume, settings
import numpy as np
Reported by Pylint.
Line: 10
Column: 1
import hypothesis.strategies as st
from hypothesis import given, assume, settings
import numpy as np
import time
import os
from caffe2.python import core, dyndep
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 11
Column: 1
from hypothesis import given, assume, settings
import numpy as np
import time
import os
from caffe2.python import core, dyndep
import caffe2.python.hypothesis_test_util as hu
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/nnpack:nnpack_ops")
Reported by Pylint.
Line: 21
Column: 1
np.random.seed(1)
def benchmark(ws, net, warmups=5, iters=100):
for _ in range(warmups):
ws.run(net)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("test-step", net, iters))
before = time.time()
Reported by Pylint.
Line: 21
Column: 1
np.random.seed(1)
def benchmark(ws, net, warmups=5, iters=100):
for _ in range(warmups):
ws.run(net)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("test-step", net, iters))
before = time.time()
Reported by Pylint.
Line: 34
Column: 1
return after - before
def has_avx2():
import subprocess
try:
subprocess.check_output(["grep", "avx2", "/proc/cpuinfo"])
return True
except subprocess.CalledProcessError:
Reported by Pylint.
Line: 35
Column: 5
def has_avx2():
import subprocess
try:
subprocess.check_output(["grep", "avx2", "/proc/cpuinfo"])
return True
except subprocess.CalledProcessError:
# grep exits with rc 1 on no matches
Reported by Pylint.
Line: 35
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
def has_avx2():
import subprocess
try:
subprocess.check_output(["grep", "avx2", "/proc/cpuinfo"])
return True
except subprocess.CalledProcessError:
# grep exits with rc 1 on no matches
Reported by Bandit.
torch/autograd/profiler_legacy.py
36 issues
Line: 153
Column: 5
"""
return (record.handle(), record.node_id())
next_id = 0
start_record = None
functions = []
record_stack = []
# '__start_profile' is not guaranteed to be first, so we must find it here
Reported by Pylint.
Line: 156
Column: 5
next_id = 0
start_record = None
functions = []
record_stack = []
# '__start_profile' is not guaranteed to be first, so we must find it here
for record in itertools.chain(*thread_records):
name = record.name()
if start_record is None and name == '__start_profile':
Reported by Pylint.
Line: 1
Column: 1
import torch
import torch.cuda
from torch.autograd.profiler_util import (
EventList, FunctionEvent, MEMORY_EVENT_NAME,
_filter_name, _filter_stack_entry, _rewrite_name
)
from torch.autograd import (
DeviceType, ProfilerConfig, ProfilerState,
Reported by Pylint.
Line: 13
Column: 1
_disable_profiler_legacy, _enable_profiler_legacy,
)
import itertools
from warnings import warn
class profile(object):
"""DEPRECATED: use torch.profiler instead"""
Reported by Pylint.
Line: 14
Column: 1
)
import itertools
from warnings import warn
class profile(object):
"""DEPRECATED: use torch.profiler instead"""
def __init__(
Reported by Pylint.
Line: 17
Column: 1
from warnings import warn
class profile(object):
"""DEPRECATED: use torch.profiler instead"""
def __init__(
self,
enabled=True,
*,
Reported by Pylint.
Line: 17
Column: 1
from warnings import warn
class profile(object):
"""DEPRECATED: use torch.profiler instead"""
def __init__(
self,
enabled=True,
*,
Reported by Pylint.
Line: 17
Column: 1
from warnings import warn
class profile(object):
"""DEPRECATED: use torch.profiler instead"""
def __init__(
self,
enabled=True,
*,
Reported by Pylint.
Line: 51
Column: 5
else:
self.profiler_kind = ProfilerState.CPU
def config(self):
return ProfilerConfig(
self.profiler_kind,
self.record_shapes,
self.profile_memory,
self.with_stack,
Reported by Pylint.
Line: 60
Column: 5
self.with_flops,
self.with_modules)
def __enter__(self):
if not self.enabled:
return
if self.entered:
raise RuntimeError("Profiler context manager is not reentrant")
self.entered = True
Reported by Pylint.
test/distributed/test_distributed_fork.py
36 issues
Line: 5
Column: 1
import sys
import tempfile
from functools import wraps
import torch
import torch.cuda
import torch.distributed as dist
from torch.testing._internal.common_utils import TEST_WITH_TSAN
if not dist.is_available():
Reported by Pylint.
Line: 6
Column: 1
import tempfile
from functools import wraps
import torch
import torch.cuda
import torch.distributed as dist
from torch.testing._internal.common_utils import TEST_WITH_TSAN
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
Reported by Pylint.
Line: 7
Column: 1
from functools import wraps
import torch
import torch.cuda
import torch.distributed as dist
from torch.testing._internal.common_utils import TEST_WITH_TSAN
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 8
Column: 1
import torch
import torch.cuda
import torch.distributed as dist
from torch.testing._internal.common_utils import TEST_WITH_TSAN
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 14
Column: 1
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import TestCase, find_free_port, run_tests
from torch.distributed.distributed_c10d import _get_default_group
from torch.testing._internal.distributed.distributed_test import (
DistributedTest, TestDistBackend
)
Reported by Pylint.
Line: 15
Column: 1
sys.exit(0)
from torch.testing._internal.common_utils import TestCase, find_free_port, run_tests
from torch.distributed.distributed_c10d import _get_default_group
from torch.testing._internal.distributed.distributed_test import (
DistributedTest, TestDistBackend
)
torch.backends.cuda.matmul.allow_tf32 = False
Reported by Pylint.
Line: 16
Column: 1
from torch.testing._internal.common_utils import TestCase, find_free_port, run_tests
from torch.distributed.distributed_c10d import _get_default_group
from torch.testing._internal.distributed.distributed_test import (
DistributedTest, TestDistBackend
)
torch.backends.cuda.matmul.allow_tf32 = False
Reported by Pylint.
Line: 37
Column: 13
@wraps(func)
def wrapper(*args, **kwargs):
try:
import torch.utils.cpp_extension
torch.utils.cpp_extension.verify_ninja_availability()
except RuntimeError:
print(CPP_EXTENSIONS_WARNING)
return 0
Reported by Pylint.
Line: 37
Column: 13
@wraps(func)
def wrapper(*args, **kwargs):
try:
import torch.utils.cpp_extension
torch.utils.cpp_extension.verify_ninja_availability()
except RuntimeError:
print(CPP_EXTENSIONS_WARNING)
return 0
Reported by Pylint.
Line: 53
Column: 52
if BACKEND == "gloo" or BACKEND == "nccl":
class TestDistBackendWithFork(TestDistBackend, DistributedTest._DistTestBase):
def setUp(self):
super().setUp()
self._fork_processes()
torch.backends.cudnn.flags(allow_tf32=False).__enter__()
Reported by Pylint.
caffe2/python/operator_test/distance_op_test.py
36 issues
Line: 9
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class DistanceTest(serial.SerializedTestCase):
@serial.given(n=st.integers(1, 3),
Reported by Pylint.
Line: 17
Column: 50
@serial.given(n=st.integers(1, 3),
dim=st.integers(4, 16),
**hu.gcs)
def test_cosine_similarity(self, n, dim, gc, dc):
X = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
Y = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("Y").feed(Y)
kEps = 1e-12
Reported by Pylint.
Line: 94
Column: 44
@serial.given(n=st.integers(1, 3),
dim=st.integers(4, 16),
**hu.gcs)
def test_L2_distance(self, n, dim, gc, dc):
X = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
Y = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("Y").feed(Y)
l2_op = core.CreateOperator("SquaredL2Distance",
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
Reported by Pylint.
Line: 13
Column: 1
import numpy as np
class DistanceTest(serial.SerializedTestCase):
@serial.given(n=st.integers(1, 3),
dim=st.integers(4, 16),
**hu.gcs)
def test_cosine_similarity(self, n, dim, gc, dc):
X = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
Reported by Pylint.
Line: 17
Column: 5
@serial.given(n=st.integers(1, 3),
dim=st.integers(4, 16),
**hu.gcs)
def test_cosine_similarity(self, n, dim, gc, dc):
X = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
Y = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("Y").feed(Y)
kEps = 1e-12
Reported by Pylint.
Line: 17
Column: 5
@serial.given(n=st.integers(1, 3),
dim=st.integers(4, 16),
**hu.gcs)
def test_cosine_similarity(self, n, dim, gc, dc):
X = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
Y = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("Y").feed(Y)
kEps = 1e-12
Reported by Pylint.
Line: 17
Column: 5
@serial.given(n=st.integers(1, 3),
dim=st.integers(4, 16),
**hu.gcs)
def test_cosine_similarity(self, n, dim, gc, dc):
X = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
Y = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("Y").feed(Y)
kEps = 1e-12
Reported by Pylint.
Line: 17
Column: 5
@serial.given(n=st.integers(1, 3),
dim=st.integers(4, 16),
**hu.gcs)
def test_cosine_similarity(self, n, dim, gc, dc):
X = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
Y = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("Y").feed(Y)
kEps = 1e-12
Reported by Pylint.
Line: 18
Column: 9
dim=st.integers(4, 16),
**hu.gcs)
def test_cosine_similarity(self, n, dim, gc, dc):
X = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
Y = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("Y").feed(Y)
kEps = 1e-12
cos_op = core.CreateOperator("CosineSimilarity", ["X", "Y"], ["cos"])
Reported by Pylint.
test/quantization/eager/test_fusion.py
36 issues
Line: 1
Column: 1
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.qat as nniqat
from torch.quantization import (
quantize,
prepare,
Reported by Pylint.
Line: 2
Column: 1
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.qat as nniqat
from torch.quantization import (
quantize,
prepare,
Reported by Pylint.
Line: 3
Column: 1
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.qat as nniqat
from torch.quantization import (
quantize,
prepare,
Reported by Pylint.
Line: 4
Column: 1
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.qat as nniqat
from torch.quantization import (
quantize,
prepare,
Reported by Pylint.
Line: 5
Column: 1
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.qat as nniqat
from torch.quantization import (
quantize,
prepare,
convert,
Reported by Pylint.
Line: 6
Column: 1
import torch.nn.quantized as nnq
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.qat as nniqat
from torch.quantization import (
quantize,
prepare,
convert,
prepare_qat,
Reported by Pylint.
Line: 7
Column: 1
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.qat as nniqat
from torch.quantization import (
quantize,
prepare,
convert,
prepare_qat,
quantize_qat,
Reported by Pylint.
Line: 19
Column: 1
default_qat_qconfig,
)
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
ModelForFusion,
ModelWithSequentialFusion,
ModelForLinearBNFusion,
ModelForFusionWithBias,
Reported by Pylint.
Line: 30
Column: 1
skipIfNoFBGEMM,
)
from torch.testing._internal.common_quantized import (
override_quantized_engine,
supported_qengines,
)
Reported by Pylint.
Line: 330
Column: 55
}
fused = False
def fw_pre_hook(fused_module_class, h_module, input):
if fused:
self.assertEqual(type(h_module), fused_module_class,
"After fusion owner of the first module's forward pre hook is not a fused module")
counter['pre_forwards'] += 1
Reported by Pylint.
caffe2/python/onnx/tests/conversion_test.py
36 issues
Line: 20
Column: 1
from caffe2.python.model_helper import ModelHelper
from click.testing import CliRunner
import numpy as np
from onnx import helper, ModelProto, TensorProto
from caffe2.python.onnx.helper import c2_native_run_net
from caffe2.python.onnx.bin.conversion import caffe2_to_onnx, onnx_to_caffe2
import caffe2.python.onnx.backend as c2
from caffe2.python.onnx.tests.test_utils import TestCase
Reported by Pylint.
Line: 227
Column: 13
# input needs at least one value.
graph_inputs = [helper.make_tensor_value_info("i", TensorProto.INT64, (1,)),
helper.make_tensor_value_info("cond", TensorProto.BOOL, (1,))]
for type, shape, name in input_types:
graph_inputs.append(helper.make_tensor_value_info("_" + name, type, shape))
graph_outputs = [helper.make_tensor_value_info("cond", TensorProto.BOOL, (1,))]
for type, shape, name in output_types:
graph_outputs.append(helper.make_tensor_value_info("_" + name, type, shape))
body_graph = helper.make_graph(body_nodes, "body_graph", graph_inputs,
Reported by Pylint.
Line: 272
Column: 3
out = p.run(X)
np.testing.assert_allclose(out.Y, Y)
# TODO investigate why this is failing after changing Reshape
# operator from taking the new shape as attribute to as input
@unittest.skip('Start failing after Reshape op change')
def test_convert_end2end(self):
predict_net_f = tempfile.NamedTemporaryFile()
init_net_f = tempfile.NamedTemporaryFile()
Reported by Pylint.
Line: 1
Column: 1
## @package onnx
# Module caffe2.python.onnx.tests.conversion_test
import json
import tempfile
Reported by Pylint.
Line: 21
Column: 1
from click.testing import CliRunner
import numpy as np
from onnx import helper, ModelProto, TensorProto
from caffe2.python.onnx.helper import c2_native_run_net
from caffe2.python.onnx.bin.conversion import caffe2_to_onnx, onnx_to_caffe2
import caffe2.python.onnx.backend as c2
from caffe2.python.onnx.tests.test_utils import TestCase
Reported by Pylint.
Line: 28
Column: 1
from caffe2.python.onnx.tests.test_utils import TestCase
class TestConversion(TestCase):
def _run_command(self, cmd, *args, **kwargs):
runner = CliRunner()
result = runner.invoke(cmd, *args, **kwargs)
self.assertEqual(result.exit_code, 0, textwrap.dedent('''
Command exited with non-zero exit code:
Reported by Pylint.
Line: 42
Column: 5
traceback.format_exception(*result.exc_info))))
return result
def test_caffe2_to_onnx(self):
caffe2_net = tempfile.NamedTemporaryFile()
caffe2_init_net = tempfile.NamedTemporaryFile()
output = tempfile.NamedTemporaryFile()
model = ModelHelper(name='caffe2-to-onnx-test')
Reported by Pylint.
Line: 74
Column: 5
self.assertEqual(len(onnx_model.graph.initializer), 1)
self.assertEqual(onnx_model.graph.initializer[0].name, onnx_model.graph.input[0].name)
def test_caffe2_to_onnx_value_info(self):
caffe2_net = tempfile.NamedTemporaryFile()
output = tempfile.NamedTemporaryFile()
model = ModelHelper(name='caffe2-to-onnx-test')
brew.relu(model, ["X"], "Y")
Reported by Pylint.
Line: 102
Column: 5
self.assertEqual(len(onnx_model.graph.initializer), 0)
@unittest.skip("Disabled due to onnx optimizer deprecation")
def test_onnx_to_caffe2(self):
onnx_model = tempfile.NamedTemporaryFile()
output = tempfile.NamedTemporaryFile()
init_net_output = tempfile.NamedTemporaryFile()
node_def = helper.make_node(
Reported by Pylint.
Line: 142
Column: 5
for init_op in caffe2_init_net.op], [])),
{'W'})
def test_onnx_to_caffe2_zipfile(self):
buf = tempfile.NamedTemporaryFile()
onnx_model = zipfile.ZipFile(buf, 'w')
node_def = helper.make_node(
"MatMul", ["X", "W"], ["Y"])
Reported by Pylint.
torch/distributed/algorithms/ddp_comm_hooks/quantization_hooks.py
36 issues
Line: 7
Column: 9
def _quantize_per_tensor_cuda(x, scale, zero_point):
y = torch.round(x / scale) + zero_point
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
def _dequantize_per_tensor_cuda(y, scale, zero_point):
Reported by Pylint.
Line: 8
Column: 35
def _quantize_per_tensor_cuda(x, scale, zero_point):
y = torch.round(x / scale) + zero_point
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
def _dequantize_per_tensor_cuda(y, scale, zero_point):
x = scale * (y.to(torch.float32) - zero_point)
Reported by Pylint.
Line: 8
Column: 9
def _quantize_per_tensor_cuda(x, scale, zero_point):
y = torch.round(x / scale) + zero_point
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
def _dequantize_per_tensor_cuda(y, scale, zero_point):
x = scale * (y.to(torch.float32) - zero_point)
Reported by Pylint.
Line: 13
Column: 23
def _dequantize_per_tensor_cuda(y, scale, zero_point):
x = scale * (y.to(torch.float32) - zero_point)
return x
def _quantize_per_channel_cuda(x, scale, zero_point):
y = torch.zeros(x.size(), device=x.device)
Reported by Pylint.
Line: 18
Column: 9
def _quantize_per_channel_cuda(x, scale, zero_point):
y = torch.zeros(x.size(), device=x.device)
for i in range(x.size()[0]):
y[i, :] = torch.round(x[i, :] / scale[i]) + zero_point[i]
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
Reported by Pylint.
Line: 20
Column: 19
def _quantize_per_channel_cuda(x, scale, zero_point):
y = torch.zeros(x.size(), device=x.device)
for i in range(x.size()[0]):
y[i, :] = torch.round(x[i, :] / scale[i]) + zero_point[i]
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
def _dequantize_per_channel_cuda(y, scale, zero_point):
Reported by Pylint.
Line: 21
Column: 35
y = torch.zeros(x.size(), device=x.device)
for i in range(x.size()[0]):
y[i, :] = torch.round(x[i, :] / scale[i]) + zero_point[i]
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
def _dequantize_per_channel_cuda(y, scale, zero_point):
y = y.to(torch.float32).cuda(y.device)
Reported by Pylint.
Line: 21
Column: 9
y = torch.zeros(x.size(), device=x.device)
for i in range(x.size()[0]):
y[i, :] = torch.round(x[i, :] / scale[i]) + zero_point[i]
y = torch.clamp(y, 0, 255).to(torch.uint8)
return y
def _dequantize_per_channel_cuda(y, scale, zero_point):
y = y.to(torch.float32).cuda(y.device)
Reported by Pylint.
Line: 26
Column: 14
def _dequantize_per_channel_cuda(y, scale, zero_point):
y = y.to(torch.float32).cuda(y.device)
x = torch.zeros_like(y, device=y.device)
for i in range(x.size()[0]):
x[i, :] = scale[i] * (y[i, :] - zero_point[i])
return x
Reported by Pylint.
Line: 27
Column: 9
def _dequantize_per_channel_cuda(y, scale, zero_point):
y = y.to(torch.float32).cuda(y.device)
x = torch.zeros_like(y, device=y.device)
for i in range(x.size()[0]):
x[i, :] = scale[i] * (y[i, :] - zero_point[i])
return x
Reported by Pylint.
test/ao/sparsity/test_parametrization.py
36 issues
Line: 5
Column: 1
import logging
from torch import nn
from torch.ao.sparsity.sparsifier import utils
from torch.nn.utils import parametrize
import torch
from torch.testing._internal.common_utils import TestCase
Reported by Pylint.
Line: 6
Column: 1
import logging
from torch import nn
from torch.ao.sparsity.sparsifier import utils
from torch.nn.utils import parametrize
import torch
from torch.testing._internal.common_utils import TestCase
Reported by Pylint.
Line: 7
Column: 1
from torch import nn
from torch.ao.sparsity.sparsifier import utils
from torch.nn.utils import parametrize
import torch
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
Reported by Pylint.
Line: 9
Column: 1
from torch.ao.sparsity.sparsifier import utils
from torch.nn.utils import parametrize
import torch
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class ModelUnderTest(nn.Module):
Reported by Pylint.
Line: 10
Column: 1
from torch.nn.utils import parametrize
import torch
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class ModelUnderTest(nn.Module):
def __init__(self, bias=True):
Reported by Pylint.
Line: 1
Column: 1
# -*- coding: utf-8 -*-
import logging
from torch import nn
from torch.ao.sparsity.sparsifier import utils
from torch.nn.utils import parametrize
import torch
Reported by Pylint.
Line: 12
Column: 1
import torch
from torch.testing._internal.common_utils import TestCase
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class ModelUnderTest(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.linear = nn.Linear(16, 16, bias=bias)
Reported by Pylint.
Line: 14
Column: 1
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class ModelUnderTest(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.linear = nn.Linear(16, 16, bias=bias)
self.seq = nn.Sequential(
nn.Linear(16, 16, bias=bias),
Reported by Pylint.
Line: 14
Column: 1
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class ModelUnderTest(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.linear = nn.Linear(16, 16, bias=bias)
self.seq = nn.Sequential(
nn.Linear(16, 16, bias=bias),
Reported by Pylint.
Line: 32
Column: 5
self.seq[0] = nn.Parameter(torch.zeros_like(self.seq[0].bias) + 20.0)
self.seq[0] = nn.Parameter(torch.zeros_like(self.seq[0].bias) + 30.0)
def forward(self, x):
x = self.linear(x)
x = self.seq(x)
return x
Reported by Pylint.
benchmarks/operator_benchmark/pt/qobserver_test.py
36 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
import torch.quantization.observer as obs
qobserver_short_configs_dict = {
'attr_names': ('C', 'M', 'N', 'dtype', 'device'),
'attrs': (
(3, 512, 512, torch.quint8, 'cpu'),
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
import torch.quantization.observer as obs
qobserver_short_configs_dict = {
'attr_names': ('C', 'M', 'N', 'dtype', 'device'),
'attrs': (
(3, 512, 512, torch.quint8, 'cpu'),
Reported by Pylint.
Line: 42
Column: 38
}
qobserver_per_tensor_configs_short = op_bench.config_list(
cross_product_configs={
'qscheme': (torch.per_tensor_affine, torch.per_tensor_symmetric)
},
**qobserver_short_configs_dict,
)
Reported by Pylint.
Line: 49
Column: 37
**qobserver_short_configs_dict,
)
qobserver_per_tensor_configs_long = op_bench.cross_product_configs(
qscheme=(torch.per_tensor_affine, torch.per_tensor_symmetric),
**qobserver_long_configs_dict,
)
qobserver_per_channel_configs_short = op_bench.config_list(
Reported by Pylint.
Line: 54
Column: 39
**qobserver_long_configs_dict,
)
qobserver_per_channel_configs_short = op_bench.config_list(
cross_product_configs={
'qscheme': (torch.per_channel_affine, torch.per_channel_symmetric)
},
**qobserver_short_configs_dict,
)
Reported by Pylint.
Line: 61
Column: 38
**qobserver_short_configs_dict,
)
qobserver_per_channel_configs_long = op_bench.cross_product_configs(
qscheme=(torch.per_channel_affine, torch.per_channel_symmetric),
**qobserver_long_configs_dict,
)
q_hist_observer_per_tensor_configs_short = op_bench.config_list(
Reported by Pylint.
Line: 66
Column: 44
**qobserver_long_configs_dict,
)
q_hist_observer_per_tensor_configs_short = op_bench.config_list(
cross_product_configs={
'qscheme': (torch.per_tensor_affine, torch.per_tensor_symmetric)
},
**q_hist_observer_short_configs_dict,
)
Reported by Pylint.
Line: 73
Column: 43
**q_hist_observer_short_configs_dict,
)
q_hist_observer_per_tensor_configs_long = op_bench.cross_product_configs(
qscheme=(torch.per_tensor_affine, torch.per_tensor_symmetric),
**q_hist_observer_long_configs_dict,
)
Reported by Pylint.
Line: 79
Column: 29
)
qobserver_per_tensor_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['MinMaxObserver', obs.MinMaxObserver],
['MovingAverageMinMaxObserver', obs.MovingAverageMinMaxObserver],
]
Reported by Pylint.
Line: 87
Column: 30
]
)
qobserver_per_channel_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['PerChannelMinMaxObserver', obs.PerChannelMinMaxObserver],
['MovingAveragePerChannelMinMaxObserver',
obs.MovingAveragePerChannelMinMaxObserver],
Reported by Pylint.
test/onnx/test_pytorch_onnx_onnxruntime_cuda.py
36 issues
Line: 2
Column: 1
import unittest
import onnxruntime # noqa: F401
import torch
from torch.cuda.amp import autocast
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion
from test_pytorch_common import skipIfNoCuda
Reported by Pylint.
Line: 3
Column: 1
import unittest
import onnxruntime # noqa: F401
import torch
from torch.cuda.amp import autocast
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion
from test_pytorch_common import skipIfNoCuda
Reported by Pylint.
Line: 5
Column: 1
import onnxruntime # noqa: F401
import torch
from torch.cuda.amp import autocast
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion
from test_pytorch_common import skipIfNoCuda
from test_pytorch_onnx_onnxruntime import TestONNXRuntime
Reported by Pylint.
Line: 13
Column: 5
from test_pytorch_onnx_onnxruntime import TestONNXRuntime
class TestONNXRuntime_cuda(unittest.TestCase):
from torch.onnx.symbolic_helper import _export_onnx_opset_version
opset_version = _export_onnx_opset_version
keep_initializers_as_inputs = True
onnx_shape_inference = True
@skipIfUnsupportedMinOpsetVersion(9)
Reported by Pylint.
Line: 2
Column: 1
import unittest
import onnxruntime # noqa: F401
import torch
from torch.cuda.amp import autocast
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion
from test_pytorch_common import skipIfNoCuda
Reported by Pylint.
Line: 53
Column: 31
self.m = torch.nn.LogSoftmax(dim=1)
@autocast()
def forward(self, input, target):
output = self.loss(self.m(2 * input), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, dtype=torch.float16, device=torch.device("cuda"))
Reported by Pylint.
Line: 58
Column: 9
return output
N, C = 5, 4
input = torch.randn(N, 16, dtype=torch.float16, device=torch.device("cuda"))
target = torch.empty(N, dtype=torch.long, device=torch.device("cuda")).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(FusionModel(), (input, target))
Reported by Pylint.
Line: 1
Column: 1
import unittest
import onnxruntime # noqa: F401
import torch
from torch.cuda.amp import autocast
from test_pytorch_common import skipIfUnsupportedMinOpsetVersion
from test_pytorch_common import skipIfNoCuda
Reported by Pylint.
Line: 12
Column: 1
from test_pytorch_onnx_onnxruntime import TestONNXRuntime
class TestONNXRuntime_cuda(unittest.TestCase):
from torch.onnx.symbolic_helper import _export_onnx_opset_version
opset_version = _export_onnx_opset_version
keep_initializers_as_inputs = True
onnx_shape_inference = True
Reported by Pylint.
Line: 12
Column: 1
from test_pytorch_onnx_onnxruntime import TestONNXRuntime
class TestONNXRuntime_cuda(unittest.TestCase):
from torch.onnx.symbolic_helper import _export_onnx_opset_version
opset_version = _export_onnx_opset_version
keep_initializers_as_inputs = True
onnx_shape_inference = True
Reported by Pylint.