The following issues were found
test/onnx/test_pytorch_onnx_caffe2.py
1342 issues
Line: 10
Column: 1
import numpy as np
from debug_embed_params import run_embed_params
from torch import nn
from torch.autograd import Variable, function
from torch.nn.utils import rnn as rnn_utils
from torch.onnx import ExportTypes
import torch.onnx
import torch.onnx.operators
Reported by Pylint.
Line: 11
Column: 1
from debug_embed_params import run_embed_params
from torch import nn
from torch.autograd import Variable, function
from torch.nn.utils import rnn as rnn_utils
from torch.onnx import ExportTypes
import torch.onnx
import torch.onnx.operators
import torch.utils.model_zoo as model_zoo
Reported by Pylint.
Line: 12
Column: 1
from debug_embed_params import run_embed_params
from torch import nn
from torch.autograd import Variable, function
from torch.nn.utils import rnn as rnn_utils
from torch.onnx import ExportTypes
import torch.onnx
import torch.onnx.operators
import torch.utils.model_zoo as model_zoo
Reported by Pylint.
Line: 13
Column: 1
from torch import nn
from torch.autograd import Variable, function
from torch.nn.utils import rnn as rnn_utils
from torch.onnx import ExportTypes
import torch.onnx
import torch.onnx.operators
import torch.utils.model_zoo as model_zoo
# Import various models for testing
Reported by Pylint.
Line: 14
Column: 1
from torch.autograd import Variable, function
from torch.nn.utils import rnn as rnn_utils
from torch.onnx import ExportTypes
import torch.onnx
import torch.onnx.operators
import torch.utils.model_zoo as model_zoo
# Import various models for testing
from torchvision.models.alexnet import alexnet
Reported by Pylint.
Line: 15
Column: 1
from torch.nn.utils import rnn as rnn_utils
from torch.onnx import ExportTypes
import torch.onnx
import torch.onnx.operators
import torch.utils.model_zoo as model_zoo
# Import various models for testing
from torchvision.models.alexnet import alexnet
from torchvision.models.densenet import densenet121
Reported by Pylint.
Line: 16
Column: 1
from torch.onnx import ExportTypes
import torch.onnx
import torch.onnx.operators
import torch.utils.model_zoo as model_zoo
# Import various models for testing
from torchvision.models.alexnet import alexnet
from torchvision.models.densenet import densenet121
from torchvision.models.inception import inception_v3
Reported by Pylint.
Line: 19
Column: 1
import torch.utils.model_zoo as model_zoo
# Import various models for testing
from torchvision.models.alexnet import alexnet
from torchvision.models.densenet import densenet121
from torchvision.models.inception import inception_v3
from torchvision.models.resnet import resnet50
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
Reported by Pylint.
Line: 20
Column: 1
# Import various models for testing
from torchvision.models.alexnet import alexnet
from torchvision.models.densenet import densenet121
from torchvision.models.inception import inception_v3
from torchvision.models.resnet import resnet50
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
from model_defs.squeezenet import SqueezeNet
Reported by Pylint.
Line: 21
Column: 1
# Import various models for testing
from torchvision.models.alexnet import alexnet
from torchvision.models.densenet import densenet121
from torchvision.models.inception import inception_v3
from torchvision.models.resnet import resnet50
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
from model_defs.squeezenet import SqueezeNet
from model_defs.super_resolution import SuperResolutionNet
Reported by Pylint.
test/quantization/jit/test_quantize_jit.py
1275 issues
Line: 3
Column: 1
# -*- coding: utf-8 -*-
# torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.jit
import torch.jit.quantized
# torch.quantization
Reported by Pylint.
Line: 4
Column: 1
# -*- coding: utf-8 -*-
# torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.jit
import torch.jit.quantized
# torch.quantization
Reported by Pylint.
Line: 5
Column: 1
# torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.jit
import torch.jit.quantized
# torch.quantization
from torch.quantization import (
Reported by Pylint.
Line: 6
Column: 1
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.jit
import torch.jit.quantized
# torch.quantization
from torch.quantization import (
QConfig,
Reported by Pylint.
Line: 7
Column: 1
import torch.nn as nn
import torch.nn.functional as F
import torch.jit
import torch.jit.quantized
# torch.quantization
from torch.quantization import (
QConfig,
default_dynamic_qconfig,
Reported by Pylint.
Line: 10
Column: 1
import torch.jit.quantized
# torch.quantization
from torch.quantization import (
QConfig,
default_dynamic_qconfig,
float16_dynamic_qconfig,
default_observer,
per_channel_dynamic_qconfig,
Reported by Pylint.
Line: 30
Column: 1
)
# torch.quantization.quantize_jit
from torch.quantization.quantize_jit import (
convert_jit,
convert_dynamic_jit,
fuse_conv_bn_jit,
prepare_jit,
prepare_dynamic_jit,
Reported by Pylint.
Line: 40
Column: 1
)
# Testing utils
from torch.testing._internal.common_quantized import (
override_qengines,
qengine_is_fbgemm,
qengine_is_qnnpack,
)
Reported by Pylint.
Line: 46
Column: 1
qengine_is_qnnpack,
)
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
skipIfNoFBGEMM,
get_script_module,
SingleLayerLinearModel,
SkipQuantModel,
Reported by Pylint.
Line: 61
Column: 1
)
# Annotated models
from torch.testing._internal.common_quantization import (
AnnotatedSingleLayerLinearModel,
AnnotatedSkipQuantModel,
AnnotatedNestedModel,
AnnotatedConvModel,
AnnotatedConvTransposeModel,
Reported by Pylint.
test/quantization/fx/test_quantize_fx.py
1269 issues
Line: 2
Column: 1
import os
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.multiprocessing as mp
Reported by Pylint.
Line: 3
Column: 1
import os
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.multiprocessing as mp
Reported by Pylint.
Line: 4
Column: 1
import os
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.multiprocessing as mp
Reported by Pylint.
Line: 5
Column: 1
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.multiprocessing as mp
Reported by Pylint.
Line: 6
Column: 1
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.multiprocessing as mp
# graph mode quantization based on fx
Reported by Pylint.
Line: 7
Column: 1
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.multiprocessing as mp
# graph mode quantization based on fx
from torch.quantization.quantize_fx import (
Reported by Pylint.
Line: 8
Column: 1
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.multiprocessing as mp
# graph mode quantization based on fx
from torch.quantization.quantize_fx import (
prepare_fx,
Reported by Pylint.
Line: 9
Column: 1
import torch.nn.quantized.dynamic as nnqd
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.multiprocessing as mp
# graph mode quantization based on fx
from torch.quantization.quantize_fx import (
prepare_fx,
convert_fx,
Reported by Pylint.
Line: 12
Column: 1
import torch.multiprocessing as mp
# graph mode quantization based on fx
from torch.quantization.quantize_fx import (
prepare_fx,
convert_fx,
prepare_qat_fx,
)
Reported by Pylint.
Line: 18
Column: 1
prepare_qat_fx,
)
from torch.quantization.fx.quantization_patterns import DefaultNodeQuantizeHandler
from torch.quantization.fx.match_utils import (
is_match,
MatchAllNode,
)
Reported by Pylint.
test/jit/test_tracer.py
1170 issues
Line: 7
Column: 1
import sys
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function
from torch.testing import FileCheck
Reported by Pylint.
Line: 8
Column: 1
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function
from torch.testing import FileCheck
# Make the helper files in test/ importable
Reported by Pylint.
Line: 9
Column: 1
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 10
Column: 1
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 11
Column: 1
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.common_utils import suppress_warnings, \
Reported by Pylint.
Line: 16
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.common_utils import suppress_warnings, \
skipIfCompiledWithoutNumpy, enable_profiling_mode_for_profiling_tests, \
IS_SANDCASTLE, TemporaryFileName
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, \
_tmp_donotuse_dont_inline_everything, _trace, RUN_CUDA, \
RUN_CUDA_MULTI_GPU, make_global
Reported by Pylint.
Line: 19
Column: 1
from torch.testing._internal.common_utils import suppress_warnings, \
skipIfCompiledWithoutNumpy, enable_profiling_mode_for_profiling_tests, \
IS_SANDCASTLE, TemporaryFileName
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, \
_tmp_donotuse_dont_inline_everything, _trace, RUN_CUDA, \
RUN_CUDA_MULTI_GPU, make_global
from torch.testing._internal.common_cuda import with_tf32_off
from torch import Tensor
Reported by Pylint.
Line: 22
Column: 1
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, \
_tmp_donotuse_dont_inline_everything, _trace, RUN_CUDA, \
RUN_CUDA_MULTI_GPU, make_global
from torch.testing._internal.common_cuda import with_tf32_off
from torch import Tensor
# Standard library
from collections import namedtuple
from itertools import chain
Reported by Pylint.
Line: 23
Column: 1
_tmp_donotuse_dont_inline_everything, _trace, RUN_CUDA, \
RUN_CUDA_MULTI_GPU, make_global
from torch.testing._internal.common_cuda import with_tf32_off
from torch import Tensor
# Standard library
from collections import namedtuple
from itertools import chain
from typing import Dict, List, Optional, Tuple
Reported by Pylint.
Line: 1905
Column: 24
def test_non_tensor_tracing(self):
def f(x):
return x + param
with self.assertRaisesRegex(RuntimeError, r"Type 'Tuple\[int\]' cannot be traced"):
torch.jit.trace(f, (1,))
def test_trace_skip_none_submodule(self):
class TestModule(torch.nn.Module):
Reported by Pylint.
test/test_cuda.py
992 issues
Line: 15
Column: 1
import threading
import unittest
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
Reported by Pylint.
Line: 16
Column: 1
import unittest
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
Reported by Pylint.
Line: 17
Column: 1
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from test_torch import AbstractTestCases
Reported by Pylint.
Line: 18
Column: 1
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from test_torch import AbstractTestCases
Reported by Pylint.
Line: 19
Column: 1
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from test_torch import AbstractTestCases
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
Reported by Pylint.
Line: 20
Column: 1
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from test_torch import AbstractTestCases
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
Reported by Pylint.
Line: 24
Column: 1
from test_torch import AbstractTestCases
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY
from torch.testing._internal.autocast_test_lists import AutocastTestLists
Reported by Pylint.
Line: 26
Column: 1
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
Reported by Pylint.
Line: 29
Column: 1
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
Reported by Pylint.
Line: 700
Column: 9
os.remove(fname)
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
_get_device_index('cuda0', optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
cpu_device = torch.device('cpu')
Reported by Pylint.
test/jit/test_freezing.py
988 issues
Line: 1
Column: 1
import torch
import torch.nn as nn
import torch.nn.functional as F
import unittest
from torch.testing._internal.jit_utils import JitTestCase
from torch._C import parse_ir
from torch.testing import FileCheck
from torch.testing._internal.common_quantized import override_quantized_engine
Reported by Pylint.
Line: 2
Column: 1
import torch
import torch.nn as nn
import torch.nn.functional as F
import unittest
from torch.testing._internal.jit_utils import JitTestCase
from torch._C import parse_ir
from torch.testing import FileCheck
from torch.testing._internal.common_quantized import override_quantized_engine
Reported by Pylint.
Line: 3
Column: 1
import torch
import torch.nn as nn
import torch.nn.functional as F
import unittest
from torch.testing._internal.jit_utils import JitTestCase
from torch._C import parse_ir
from torch.testing import FileCheck
from torch.testing._internal.common_quantized import override_quantized_engine
Reported by Pylint.
Line: 5
Column: 1
import torch.nn as nn
import torch.nn.functional as F
import unittest
from torch.testing._internal.jit_utils import JitTestCase
from torch._C import parse_ir
from torch.testing import FileCheck
from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
Reported by Pylint.
Line: 6
Column: 1
import torch.nn.functional as F
import unittest
from torch.testing._internal.jit_utils import JitTestCase
from torch._C import parse_ir
from torch.testing import FileCheck
from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import set_default_dtype
Reported by Pylint.
Line: 8
Column: 1
from torch.testing._internal.jit_utils import JitTestCase
from torch._C import parse_ir
from torch.testing import FileCheck
from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import set_default_dtype
from torch.utils import mkldnn as mkldnn_utils
Reported by Pylint.
Line: 9
Column: 1
from torch._C import parse_ir
from torch.testing import FileCheck
from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import set_default_dtype
from torch.utils import mkldnn as mkldnn_utils
Reported by Pylint.
Line: 10
Column: 1
from torch.testing import FileCheck
from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import set_default_dtype
from torch.utils import mkldnn as mkldnn_utils
from torch.jit._recursive import wrap_cpp_module
Reported by Pylint.
Line: 11
Column: 1
from torch.testing import FileCheck
from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import set_default_dtype
from torch.utils import mkldnn as mkldnn_utils
from torch.jit._recursive import wrap_cpp_module
from typing import Any
Reported by Pylint.
Line: 12
Column: 1
from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_utils import set_default_dtype
from torch.utils import mkldnn as mkldnn_utils
from torch.jit._recursive import wrap_cpp_module
from typing import Any
from itertools import product
Reported by Pylint.
torch/testing/_internal/common_nn.py
962 issues
Line: 106
Column: 38
constructor_args=(10, 8),
cpp_constructor_args='torch::nn::LinearOptions(10, 8)',
input_size=(4, 10),
reference_fn=lambda i, p, _: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8),
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='Linear',
Reported by Pylint.
Line: 116
Column: 38
cpp_constructor_args='torch::nn::LinearOptions(10, 8).bias(false)',
input_size=(4, 10),
desc='no_bias',
reference_fn=lambda i, p, _: torch.mm(i, p[0].t()),
with_tf32=True,
tf32_precision=0.005,
),
dict(
module_name='Threshold',
Reported by Pylint.
Line: 174
Column: 36
dict(
module_name='Flatten',
input_size=(2, 3, 4, 5),
reference_fn=lambda i, *_: torch.flatten(i, 1)
),
dict(
module_name='Softmax',
constructor_args=(1,),
cpp_constructor_args='torch::nn::SoftmaxOptions(1)',
Reported by Pylint.
Line: 181
Column: 36
constructor_args=(1,),
cpp_constructor_args='torch::nn::SoftmaxOptions(1)',
input_size=(10, 20),
reference_fn=lambda i, *_: torch.exp(i).div(torch.exp(i).sum(1, True).expand(10, 20)),
),
dict(
module_name='Softmax2d',
input_size=(1, 3, 10, 20),
reference_fn=lambda i, *_: torch.exp(i).div(torch.exp(i).sum(1, False)),
Reported by Pylint.
Line: 181
Column: 53
constructor_args=(1,),
cpp_constructor_args='torch::nn::SoftmaxOptions(1)',
input_size=(10, 20),
reference_fn=lambda i, *_: torch.exp(i).div(torch.exp(i).sum(1, True).expand(10, 20)),
),
dict(
module_name='Softmax2d',
input_size=(1, 3, 10, 20),
reference_fn=lambda i, *_: torch.exp(i).div(torch.exp(i).sum(1, False)),
Reported by Pylint.
Line: 186
Column: 53
dict(
module_name='Softmax2d',
input_size=(1, 3, 10, 20),
reference_fn=lambda i, *_: torch.exp(i).div(torch.exp(i).sum(1, False)),
),
dict(
module_name='LogSoftmax',
constructor_args=(1,),
cpp_constructor_args='torch::nn::LogSoftmaxOptions(1)',
Reported by Pylint.
Line: 186
Column: 36
dict(
module_name='Softmax2d',
input_size=(1, 3, 10, 20),
reference_fn=lambda i, *_: torch.exp(i).div(torch.exp(i).sum(1, False)),
),
dict(
module_name='LogSoftmax',
constructor_args=(1,),
cpp_constructor_args='torch::nn::LogSoftmaxOptions(1)',
Reported by Pylint.
Line: 193
Column: 36
constructor_args=(1,),
cpp_constructor_args='torch::nn::LogSoftmaxOptions(1)',
input_size=(10, 20),
reference_fn=lambda i, *_: torch.exp(i).div_(torch.exp(i).sum(1, True).expand(10, 20)).log_(),
),
dict(
module_name='LogSoftmax',
constructor_args=(1,),
cpp_constructor_args='torch::nn::LogSoftmaxOptions(1)',
Reported by Pylint.
Line: 193
Column: 54
constructor_args=(1,),
cpp_constructor_args='torch::nn::LogSoftmaxOptions(1)',
input_size=(10, 20),
reference_fn=lambda i, *_: torch.exp(i).div_(torch.exp(i).sum(1, True).expand(10, 20)).log_(),
),
dict(
module_name='LogSoftmax',
constructor_args=(1,),
cpp_constructor_args='torch::nn::LogSoftmaxOptions(1)',
Reported by Pylint.
Line: 200
Column: 36
constructor_args=(1,),
cpp_constructor_args='torch::nn::LogSoftmaxOptions(1)',
input_size=(1, 3, 10, 20),
reference_fn=lambda i, *_: torch.exp(i).div_(torch.exp(i).sum(1, False)).log_(),
desc='multiparam',
),
dict(
module_name='ELU',
constructor_args=(2.,),
Reported by Pylint.
test/test_jit_fuser_te.py
886 issues
Line: 5
Column: 1
import unittest
import contextlib
import math
import torch
import torch.nn.functional as F
from torch.testing import FileCheck
from typing import List
# these needs to be set before `common_utils`
Reported by Pylint.
Line: 6
Column: 1
import contextlib
import math
import torch
import torch.nn.functional as F
from torch.testing import FileCheck
from typing import List
# these needs to be set before `common_utils`
# infers `GRAPH_EXECUTOR`.
Reported by Pylint.
Line: 7
Column: 1
import math
import torch
import torch.nn.functional as F
from torch.testing import FileCheck
from typing import List
# these needs to be set before `common_utils`
# infers `GRAPH_EXECUTOR`.
# this file **requires** these settings
Reported by Pylint.
Line: 19
Column: 1
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, TestCase
from torch.testing._internal.jit_utils import JitTestCase, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward, set_fusion_group_inlining
from torch.testing._internal.common_methods_invocations import op_db
Reported by Pylint.
Line: 21
Column: 1
from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, TestCase
from torch.testing._internal.jit_utils import JitTestCase, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward, set_fusion_group_inlining
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
Reported by Pylint.
Line: 24
Column: 1
from torch.testing._internal.jit_utils import JitTestCase, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward, set_fusion_group_inlining
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
from textwrap import dedent
from itertools import product, permutations
Reported by Pylint.
Line: 25
Column: 1
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward, set_fusion_group_inlining
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
from textwrap import dedent
from itertools import product, permutations
from test_jit import backward_graph, get_lstm_inputs, get_milstm_inputs, \
Reported by Pylint.
Line: 30
Column: 1
from textwrap import dedent
from itertools import product, permutations
from test_jit import backward_graph, get_lstm_inputs, get_milstm_inputs, \
LSTMCellC, LSTMCellF, LSTMCellS, MiLSTMCell
from jit.test_fuser_common import TestFuserCommon # noqa: F401
FUSION_GROUP = 'prim::TensorExprGroup'
Reported by Pylint.
Line: 920
Column: 29
self.assertTrue(torch.all(out1 < 1))
self.assertTrue(torch.all(out2 >= 0))
self.assertTrue(torch.all(out2 < 1))
self.assertAllFused(m.create.graph_for(x))
@staticmethod
def fn_test_relu(x, y):
return F.relu(x + .5 * y)
Reported by Pylint.
Line: 1106
Column: 9
# test not fusing non-const inputs
@torch.jit.script
def foo(x, dtype: int):
return x.to(dtype)
foo(torch.tensor([3.], dtype=torch.float), torch.int)
foo(torch.tensor([3.], dtype=torch.float), torch.int)
FileCheck().check_not("TensorExpr").run(torch.jit.last_executed_optimized_graph())
Reported by Pylint.
test/distributions/test_distributions.py
885 issues
Line: 32
Column: 1
from itertools import product
from random import shuffle
import torch
# TODO: remove this global setting
# Distributions tests use double as the default dtype
torch.set_default_dtype(torch.double)
Reported by Pylint.
Line: 38
Column: 1
# Distributions tests use double as the default dtype
torch.set_default_dtype(torch.double)
from torch._six import inf
from torch.testing._internal.common_utils import \
(TestCase, run_tests, set_rng_seed, TEST_WITH_UBSAN, load_tests,
gradcheck, IS_MACOS)
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.autograd import grad
Reported by Pylint.
Line: 39
Column: 1
torch.set_default_dtype(torch.double)
from torch._six import inf
from torch.testing._internal.common_utils import \
(TestCase, run_tests, set_rng_seed, TEST_WITH_UBSAN, load_tests,
gradcheck, IS_MACOS)
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.autograd import grad
from torch.autograd.functional import jacobian
Reported by Pylint.
Line: 42
Column: 1
from torch.testing._internal.common_utils import \
(TestCase, run_tests, set_rng_seed, TEST_WITH_UBSAN, load_tests,
gradcheck, IS_MACOS)
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.autograd import grad
from torch.autograd.functional import jacobian
from torch.distributions import (Bernoulli, Beta, Binomial, Categorical,
Cauchy, Chi2, ContinuousBernoulli, Dirichlet,
Distribution, Exponential, ExponentialFamily,
Reported by Pylint.
Line: 43
Column: 1
(TestCase, run_tests, set_rng_seed, TEST_WITH_UBSAN, load_tests,
gradcheck, IS_MACOS)
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.autograd import grad
from torch.autograd.functional import jacobian
from torch.distributions import (Bernoulli, Beta, Binomial, Categorical,
Cauchy, Chi2, ContinuousBernoulli, Dirichlet,
Distribution, Exponential, ExponentialFamily,
FisherSnedecor, Gamma, Geometric, Gumbel,
Reported by Pylint.
Line: 44
Column: 1
gradcheck, IS_MACOS)
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.autograd import grad
from torch.autograd.functional import jacobian
from torch.distributions import (Bernoulli, Beta, Binomial, Categorical,
Cauchy, Chi2, ContinuousBernoulli, Dirichlet,
Distribution, Exponential, ExponentialFamily,
FisherSnedecor, Gamma, Geometric, Gumbel,
HalfCauchy, HalfNormal, Independent, Kumaraswamy,
Reported by Pylint.
Line: 45
Column: 1
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.autograd import grad
from torch.autograd.functional import jacobian
from torch.distributions import (Bernoulli, Beta, Binomial, Categorical,
Cauchy, Chi2, ContinuousBernoulli, Dirichlet,
Distribution, Exponential, ExponentialFamily,
FisherSnedecor, Gamma, Geometric, Gumbel,
HalfCauchy, HalfNormal, Independent, Kumaraswamy,
LKJCholesky, Laplace, LogisticNormal,
Reported by Pylint.
Line: 58
Column: 1
Pareto, Poisson, RelaxedBernoulli, RelaxedOneHotCategorical,
StudentT, TransformedDistribution, Uniform,
VonMises, Weibull, constraints, kl_divergence)
from torch.distributions.constraint_registry import transform_to
from torch.distributions.constraints import Constraint, is_dependent
from torch.distributions.dirichlet import _Dirichlet_backward
from torch.distributions.kl import _kl_expfamily_expfamily
from torch.distributions.transforms import (AffineTransform, CatTransform, ExpTransform,
StackTransform, identity_transform)
Reported by Pylint.
Line: 59
Column: 1
StudentT, TransformedDistribution, Uniform,
VonMises, Weibull, constraints, kl_divergence)
from torch.distributions.constraint_registry import transform_to
from torch.distributions.constraints import Constraint, is_dependent
from torch.distributions.dirichlet import _Dirichlet_backward
from torch.distributions.kl import _kl_expfamily_expfamily
from torch.distributions.transforms import (AffineTransform, CatTransform, ExpTransform,
StackTransform, identity_transform)
from torch.distributions.utils import (probs_to_logits, lazy_property, tril_matrix_to_vec,
Reported by Pylint.
Line: 60
Column: 1
VonMises, Weibull, constraints, kl_divergence)
from torch.distributions.constraint_registry import transform_to
from torch.distributions.constraints import Constraint, is_dependent
from torch.distributions.dirichlet import _Dirichlet_backward
from torch.distributions.kl import _kl_expfamily_expfamily
from torch.distributions.transforms import (AffineTransform, CatTransform, ExpTransform,
StackTransform, identity_transform)
from torch.distributions.utils import (probs_to_logits, lazy_property, tril_matrix_to_vec,
vec_to_tril_matrix)
Reported by Pylint.
test/test_sparse.py
880 issues
Line: 1
Column: 1
import torch
import itertools
import functools
import operator
import random
from collections import defaultdict
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, make_tensor, \
Reported by Pylint.
Line: 8
Column: 1
import random
from collections import defaultdict
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, make_tensor, \
DeterministicGuard
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
Reported by Pylint.
Line: 11
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, gradcheck, coalescedonoff, make_tensor, \
DeterministicGuard
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCPU, onlyCPU, onlyCUDA, deviceCountAtLeast)
from torch.testing._internal.common_methods_invocations import \
Reported by Pylint.
Line: 14
Column: 1
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCPU, onlyCPU, onlyCUDA, deviceCountAtLeast)
from torch.testing._internal.common_methods_invocations import \
(sparse_unary_ufuncs)
if TEST_SCIPY:
Reported by Pylint.
Line: 16
Column: 1
from typing import Dict, Any
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCPU, onlyCPU, onlyCUDA, deviceCountAtLeast)
from torch.testing._internal.common_methods_invocations import \
(sparse_unary_ufuncs)
if TEST_SCIPY:
import scipy.sparse
Reported by Pylint.
Line: 20
Column: 5
(sparse_unary_ufuncs)
if TEST_SCIPY:
import scipy.sparse
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
Reported by Pylint.
Line: 2926
Column: 9
@coalescedonoff
@dtypes(torch.double)
def test_softmax(self, device, dtype, coalesced):
import torch.nn.functional as F
def to_dense(sparse, fill_value=None):
"""
Return dense tensor from a sparse tensor using given fill value.
"""
Reported by Pylint.
Line: 24
Column: 1
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# batched grad doesn't support sparse
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
class TestSparse(TestCase):
Reported by Pylint.
Line: 62
Column: 24
"""
assert not x.is_coalesced()
existing_indices = set()
for i in range(x._nnz()):
index = str(x._indices()[:, i])
if index in existing_indices:
return True
else:
existing_indices.add(index)
Reported by Pylint.
Line: 63
Column: 25
assert not x.is_coalesced()
existing_indices = set()
for i in range(x._nnz()):
index = str(x._indices()[:, i])
if index in existing_indices:
return True
else:
existing_indices.add(index)
Reported by Pylint.