The following issues were found
test/test_view_ops.py
403 issues
Line: 1
Column: 1
import torch
import numpy as np
import unittest
from itertools import product, permutations, combinations
from functools import partial
import random
from torch.testing._internal.common_utils import \
Reported by Pylint.
Line: 9
Column: 1
from functools import partial
import random
from torch.testing._internal.common_utils import \
(TestCase, run_tests, suppress_warnings, make_tensor)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, onlyCPU, dtypes, onlyOnCPUAndCUDA)
# TODO: replace this with make_tensor() in common_utils.py
Reported by Pylint.
Line: 11
Column: 1
from torch.testing._internal.common_utils import \
(TestCase, run_tests, suppress_warnings, make_tensor)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, onlyCPU, dtypes, onlyOnCPUAndCUDA)
# TODO: replace this with make_tensor() in common_utils.py
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
Reported by Pylint.
Line: 14
Column: 3
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, onlyCPU, dtypes, onlyOnCPUAndCUDA)
# TODO: replace this with make_tensor() in common_utils.py
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
Reported by Pylint.
Line: 44
Column: 3
return x
# TODO: replace this with make_tensor() in common_utils.py
def _rand_shape(dim, min_size, max_size):
shape = []
for i in range(dim):
shape.append(random.randint(min_size, max_size))
return tuple(shape)
Reported by Pylint.
Line: 47
Column: 9
# TODO: replace this with make_tensor() in common_utils.py
def _rand_shape(dim, min_size, max_size):
shape = []
for i in range(dim):
shape.append(random.randint(min_size, max_size))
return tuple(shape)
# TODO: refactor tests to avoid this function
# Converts half/bfloat16 dtype to float when device is cpu
Reported by Pylint.
Line: 51
Column: 3
shape.append(random.randint(min_size, max_size))
return tuple(shape)
# TODO: refactor tests to avoid this function
# Converts half/bfloat16 dtype to float when device is cpu
def _convert_t(dtype, device):
if device == 'cpu' and dtype in {torch.half, torch.bfloat16}:
return torch.float
return dtype
Reported by Pylint.
Line: 58
Column: 3
return torch.float
return dtype
# TODO: replace this with make_tensor() in common_utils.py
# Returns a tensor of the requested shape, dtype, and device
# Requesting a half CPU tensor returns a float CPU tensor with
# values representable by a half.
# Initialization uses randint for non-float types and randn for float types.
def _make_tensor(shape, dtype, device, fill_ones=False) -> torch.Tensor:
Reported by Pylint.
Line: 90
Column: 17
exact_dtype = True
def is_view_of(self, base, other):
if (not other._is_view() or
other is base or
other._base is not base or
base.device != other.device):
return False
# Note: only validates storage on native device types
Reported by Pylint.
Line: 92
Column: 17
def is_view_of(self, base, other):
if (not other._is_view() or
other is base or
other._base is not base or
base.device != other.device):
return False
# Note: only validates storage on native device types
# because some accelerators, like XLA, do not expose storage
if base.device.type == 'cpu' or base.device.type == 'cuda':
Reported by Pylint.
torch/distributions/kl.py
402 issues
Line: 9
Column: 1
import torch
from torch._six import inf
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .continuous_bernoulli import ContinuousBernoulli
Reported by Pylint.
Line: 10
Column: 1
from torch._six import inf
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
Reported by Pylint.
Line: 11
Column: 1
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
Reported by Pylint.
Line: 12
Column: 1
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exponential import Exponential
Reported by Pylint.
Line: 13
Column: 1
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exponential import Exponential
from .exp_family import ExponentialFamily
Reported by Pylint.
Line: 14
Column: 1
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exponential import Exponential
from .exp_family import ExponentialFamily
from .gamma import Gamma
Reported by Pylint.
Line: 15
Column: 1
from .categorical import Categorical
from .cauchy import Cauchy
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exponential import Exponential
from .exp_family import ExponentialFamily
from .gamma import Gamma
from .geometric import Geometric
Reported by Pylint.
Line: 16
Column: 1
from .cauchy import Cauchy
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exponential import Exponential
from .exp_family import ExponentialFamily
from .gamma import Gamma
from .geometric import Geometric
from .gumbel import Gumbel
Reported by Pylint.
Line: 17
Column: 1
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exponential import Exponential
from .exp_family import ExponentialFamily
from .gamma import Gamma
from .geometric import Geometric
from .gumbel import Gumbel
from .half_normal import HalfNormal
Reported by Pylint.
Line: 18
Column: 1
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exponential import Exponential
from .exp_family import ExponentialFamily
from .gamma import Gamma
from .geometric import Geometric
from .gumbel import Gumbel
from .half_normal import HalfNormal
from .independent import Independent
Reported by Pylint.
caffe2/python/operator_test/elementwise_ops_test.py
398 issues
Line: 7
Column: 1
from caffe2.python import core, workspace
from hypothesis import given, assume, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 9
Column: 1
from caffe2.python import core, workspace
from hypothesis import given, assume, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import unittest
class TestElementwiseOps(hu.HypothesisTestCase):
Reported by Pylint.
Line: 63
Column: 34
@given(n=st.integers(0, 6), m=st.integers(4, 6),
seed=st.integers(0, 1000), **hu.gcs)
@settings(deadline=10000)
def test_log(self, n, m, gc, dc, seed):
np.random.seed(seed)
X = np.random.rand(n, m).astype(np.float32) + 1.0
def log_op(X):
return [np.log(X)]
Reported by Pylint.
Line: 91
Column: 38
@given(n=st.integers(0, 10), m=st.integers(4, 6),
d=st.integers(2, 3), seed=st.integers(0, 1000), **hu.gcs)
@settings(deadline=10000)
def test_powt(self, n, m, d, gc, dc, seed):
np.random.seed(seed)
X = np.random.rand(n, m, d).astype(np.float32) + 1.0
Y = np.random.rand(n, m, d).astype(np.float32) + 2.0
def powt_op(X, Y):
Reported by Pylint.
Line: 122
Column: 34
@given(n=st.integers(0, 6), m=st.integers(4, 6),
seed=st.integers(0, 1000), **hu.gcs)
@settings(deadline=10000)
def test_sqr(self, n, m, gc, dc, seed):
np.random.seed(seed)
X = np.random.rand(n, m).astype(np.float32)
def sqr_op(X):
return [np.square(X)]
Reported by Pylint.
Line: 243
Column: 34
def cube_ref(X):
return [np.power(X, 3)]
def cube_grad_ref(g_out, outputs, fwd_inputs):
dY = g_out
[X] = fwd_inputs
return [dY * np.square(X) * 3]
self.assertReferenceChecks(
Reported by Pylint.
Line: 261
Column: 42
@given(X=hu.tensor(dtype=np.float32), in_place=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_cbrt(self, X, in_place, gc, dc):
op = core.CreateOperator(
"Cbrt",
["X"],
["X"] if in_place else ["Y"],
)
Reported by Pylint.
Line: 282
Column: 47
@given(X=hu.tensor(elements=hu.floats(min_value=1.0, max_value=10.0), dtype=np.float32),
in_place=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_cbrt_grad(self, X, in_place, gc, dc):
op = core.CreateOperator(
"Cbrt",
["X"],
["X"] if in_place else ["Y"],
)
Reported by Pylint.
Line: 302
Column: 36
@given(n=st.integers(0, 6), m=st.integers(4, 6),
seed=st.integers(0, 1000), **hu.gcs)
@settings(deadline=10000)
def test_swish(self, n, m, gc, dc, seed):
np.random.seed(seed)
X = np.random.rand(n, m).astype(np.float32)
def swish(X):
return [np.divide(X, (1. + np.exp(-X)))]
Reported by Pylint.
Line: 330
Column: 53
@given(n=st.integers(0, 6), m=st.integers(4, 6),
seed=st.integers(0, 1000), **hu.gcs)
@settings(deadline=10000)
def test_swish_gradient_inplace(self, n, m, gc, dc, seed):
np.random.seed(seed)
def swish(X):
return [np.divide(X, (1. + np.exp(-X)))]
Reported by Pylint.
test/jit/test_recursive_script.py
396 issues
Line: 5
Column: 1
import sys
import types
import typing
import typing_extensions
from typing import List, Dict, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
Reported by Pylint.
Line: 8
Column: 1
import typing_extensions
from typing import List, Dict, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from torch.testing import FileCheck
from collections import OrderedDict
Reported by Pylint.
Line: 9
Column: 1
from typing import List, Dict, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from torch.testing import FileCheck
from collections import OrderedDict
# Make the helper files in test/ importable
Reported by Pylint.
Line: 10
Column: 1
import torch
import torch.nn as nn
from torch import Tensor
from torch.testing import FileCheck
from collections import OrderedDict
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 11
Column: 1
import torch
import torch.nn as nn
from torch import Tensor
from torch.testing import FileCheck
from collections import OrderedDict
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 17
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, _tmp_donotuse_dont_inline_everything
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 75
Column: 20
def test_failed_function_compilation(self):
def fn(x):
return i_dont_exist
class M(torch.nn.Module):
def __init__(self, fn):
super(M, self).__init__()
self.fn = fn
Reported by Pylint.
Line: 748
Column: 26
return "new"
m.i_am_ignored = types.MethodType(i_am_ignored, m)
self.assertEqual(m.i_am_ignored(), "new")
# ScriptModule should correctly reflect the override.
s = torch.jit.script(m)
self.assertEqual(s.i_am_ignored(), "new")
Reported by Pylint.
Line: 34
Column: 9
def forward(self):
assert self.x is None
m = torch.jit.script(M())
self.checkModule(M(), ())
def test_script_function_attribute(self):
@torch.jit.script
def fn1(x):
Reported by Pylint.
Line: 74
Column: 16
self.checkModule(mod, (torch.randn(2, 2),))
def test_failed_function_compilation(self):
def fn(x):
return i_dont_exist
class M(torch.nn.Module):
def __init__(self, fn):
super(M, self).__init__()
Reported by Pylint.
test/test_jit_fuser.py
393 issues
Line: 5
Column: 1
import unittest
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
from torch.testing._internal.common_utils import run_tests, IS_SANDCASTLE, ProfilingMode, GRAPH_EXECUTOR, \
Reported by Pylint.
Line: 6
Column: 1
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
from torch.testing._internal.common_utils import run_tests, IS_SANDCASTLE, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, IS_WINDOWS, TemporaryDirectoryName, shell
Reported by Pylint.
Line: 7
Column: 1
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
from torch.testing._internal.common_utils import run_tests, IS_SANDCASTLE, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, IS_WINDOWS, TemporaryDirectoryName, shell
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, _inline_everything, \
Reported by Pylint.
Line: 8
Column: 1
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
from torch.testing._internal.common_utils import run_tests, IS_SANDCASTLE, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, IS_WINDOWS, TemporaryDirectoryName, shell
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, _inline_everything, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward
Reported by Pylint.
Line: 10
Column: 1
import torch.nn.functional as F
from torch.testing import FileCheck
from torch.testing._internal.common_utils import run_tests, IS_SANDCASTLE, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, IS_WINDOWS, TemporaryDirectoryName, shell
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, _inline_everything, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward
from textwrap import dedent
from itertools import product, permutations
Reported by Pylint.
Line: 12
Column: 1
from torch.testing._internal.common_utils import run_tests, IS_SANDCASTLE, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, IS_WINDOWS, TemporaryDirectoryName, shell
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, _inline_everything, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward
from textwrap import dedent
from itertools import product, permutations
from torch.testing._internal.common_cuda import with_tf32_off
Reported by Pylint.
Line: 16
Column: 1
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward
from textwrap import dedent
from itertools import product, permutations
from torch.testing._internal.common_cuda import with_tf32_off
from test_jit import backward_graph, all_backward_graphs, get_lstm_inputs, get_milstm_inputs, \
LSTMCellC, LSTMCellF, LSTMCellS, MiLSTMCell
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
Reported by Pylint.
Line: 18
Column: 1
from itertools import product, permutations
from torch.testing._internal.common_cuda import with_tf32_off
from test_jit import backward_graph, all_backward_graphs, get_lstm_inputs, get_milstm_inputs, \
LSTMCellC, LSTMCellF, LSTMCellS, MiLSTMCell
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
Reported by Pylint.
Line: 796
Column: 17
FileCheck.check("FusionGroup").run(str(graph))
except RuntimeError as e:
if 'Failed to compile' in e.args[0]:
warnings.warn('CPU fuser test has failed! This is not a hard failure, '
'because the kernels sometimes trigger bugs in compilers '
'(most notably GCC 7.2).')
raise unittest.SkipTest('Failed to compile') from e
else:
raise
Reported by Pylint.
Line: 838
Column: 29
self.assertTrue(torch.all(out1 < 1))
self.assertTrue(torch.all(out2 >= 0))
self.assertTrue(torch.all(out2 < 1))
self.assertAllFused(m.create.graph_for(x))
@staticmethod
def fn_test_relu(x, y):
return F.relu(x + .5 * y)
Reported by Pylint.
test/distributed/pipeline/sync/test_pipe.py
377 issues
Line: 11
Column: 1
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
Reported by Pylint.
Line: 13
Column: 1
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import NoChunk
from torch.distributed.pipeline.sync import Pipe
Reported by Pylint.
Line: 14
Column: 1
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import NoChunk
from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.pipe import PipeSequential
Reported by Pylint.
Line: 15
Column: 1
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import NoChunk
from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.pipe import PipeSequential
Reported by Pylint.
Line: 17
Column: 1
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import NoChunk
from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
Reported by Pylint.
Line: 18
Column: 1
from torch import Tensor
from torch.distributed.pipeline.sync import NoChunk
from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
Reported by Pylint.
Line: 19
Column: 1
from torch.distributed.pipeline.sync import NoChunk
from torch.distributed.pipeline.sync import Pipe
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_pipe_without_rpc():
Reported by Pylint.
Line: 27
Column: 9
def test_pipe_without_rpc():
model = nn.Sequential(nn.Linear(1, 1))
with pytest.raises(RuntimeError, match='Please initialize RPC framework'):
pipe = Pipe(model, chunks=1)
def test_parameters(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
pipe = Pipe(model, chunks=1)
assert list(pipe.parameters()) != []
Reported by Pylint.
Line: 29
Column: 21
with pytest.raises(RuntimeError, match='Please initialize RPC framework'):
pipe = Pipe(model, chunks=1)
def test_parameters(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
pipe = Pipe(model, chunks=1)
assert list(pipe.parameters()) != []
Reported by Pylint.
Line: 35
Column: 23
assert list(pipe.parameters()) != []
def test_public_attrs(setup_rpc):
class MyString:
def __init__(self, value):
self.value = value
def __str__(self):
Reported by Pylint.
torch/backends/_nnapi/serializer.py
377 issues
Line: 922
Column: 21
"Currently, reshape is only supported on NHWC tensors if the target size is [X, -1].")
# Bit of a hack here. Use a real tensor to infer the output shape.
out_shape = torch.zeros(1).expand(in_oper.shape).reshape(shape).shape
out_oper = in_oper._replace(shape=out_shape, dim_order=DimOrder.PRESUMED_CONTIGUOUS)
inputs = [None] * 2
inputs[0] = in_id
inputs[1] = self.add_immediate_int_vector(shape)
Reported by Pylint.
Line: 1635
Column: 40
assert raw_bias.shape[0] == raw_weight.shape[0]
assert raw_weight.shape[1] == input_oper.shape[1]
assert raw_weight.qscheme() == torch.per_tensor_affine
if raw_weight.dtype == torch.quint8:
unsigned_weight = raw_weight
else:
assert raw_weight.dtype == torch.qint8
unsigned_weight = torch._make_per_tensor_quantized_tensor(
Reported by Pylint.
Line: 1636
Column: 32
assert raw_weight.shape[1] == input_oper.shape[1]
assert raw_weight.qscheme() == torch.per_tensor_affine
if raw_weight.dtype == torch.quint8:
unsigned_weight = raw_weight
else:
assert raw_weight.dtype == torch.qint8
unsigned_weight = torch._make_per_tensor_quantized_tensor(
(raw_weight.int_repr().int() + 128).to(torch.uint8),
Reported by Pylint.
Line: 1639
Column: 40
if raw_weight.dtype == torch.quint8:
unsigned_weight = raw_weight
else:
assert raw_weight.dtype == torch.qint8
unsigned_weight = torch._make_per_tensor_quantized_tensor(
(raw_weight.int_repr().int() + 128).to(torch.uint8),
scale=raw_weight.q_scale(),
zero_point=raw_weight.q_zero_point() + 128)
weight_scale = unsigned_weight.q_scale()
Reported by Pylint.
Line: 1640
Column: 31
unsigned_weight = raw_weight
else:
assert raw_weight.dtype == torch.qint8
unsigned_weight = torch._make_per_tensor_quantized_tensor(
(raw_weight.int_repr().int() + 128).to(torch.uint8),
scale=raw_weight.q_scale(),
zero_point=raw_weight.q_zero_point() + 128)
weight_scale = unsigned_weight.q_scale()
bias_scale = input_oper.scale * weight_scale
Reported by Pylint.
Line: 1641
Column: 56
else:
assert raw_weight.dtype == torch.qint8
unsigned_weight = torch._make_per_tensor_quantized_tensor(
(raw_weight.int_repr().int() + 128).to(torch.uint8),
scale=raw_weight.q_scale(),
zero_point=raw_weight.q_zero_point() + 128)
weight_scale = unsigned_weight.q_scale()
bias_scale = input_oper.scale * weight_scale
int_bias = torch.quantize_per_tensor(raw_bias, bias_scale, 0, torch.qint32)
Reported by Pylint.
Line: 1646
Column: 71
zero_point=raw_weight.q_zero_point() + 128)
weight_scale = unsigned_weight.q_scale()
bias_scale = input_oper.scale * weight_scale
int_bias = torch.quantize_per_tensor(raw_bias, bias_scale, 0, torch.qint32)
bias_id = self.add_tensor_operand_for_weight(int_bias)
multiplier = input_oper.scale * weight_scale / out_scale
assert multiplier > 0
if multiplier >= 1:
Reported by Pylint.
Line: 1646
Column: 20
zero_point=raw_weight.q_zero_point() + 128)
weight_scale = unsigned_weight.q_scale()
bias_scale = input_oper.scale * weight_scale
int_bias = torch.quantize_per_tensor(raw_bias, bias_scale, 0, torch.qint32)
bias_id = self.add_tensor_operand_for_weight(int_bias)
multiplier = input_oper.scale * weight_scale / out_scale
assert multiplier > 0
if multiplier >= 1:
Reported by Pylint.
Line: 1684
Column: 33
ctype, value = self.get_constant_value(jit_bias)
if ctype.kind() == "NoneType":
bias_idx = 1 if transpose else 0
nnapi_bias_tensor = torch.zeros(weight_tensor.size()[bias_idx], dtype=weight_tensor.dtype)
bias_id = self.add_tensor_operand_for_weight(nnapi_bias_tensor)
bias_oper = self.operands[bias_id]
return bias_id, bias_oper
else:
return self.get_tensor_operand_for_weight(jit_bias)
Reported by Pylint.
Line: 1812
Column: 40
assert raw_bias is not None
args = self.get_conv_pool_args_2d_from_pack(raw_weight.shape[2:4], packed_config)
assert raw_weight.qscheme() == torch.per_tensor_affine
if raw_weight.dtype == torch.quint8:
unsigned_weight = raw_weight
else:
assert raw_weight.dtype == torch.qint8
unsigned_weight = torch._make_per_tensor_quantized_tensor(
Reported by Pylint.
test/distributed/test_c10d_nccl.py
373 issues
Line: 15
Column: 1
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 16
Column: 1
from unittest import mock
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 23
Column: 1
sys.exit(0)
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
Reported by Pylint.
Line: 24
Column: 1
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
Reported by Pylint.
Line: 25
Column: 1
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
Reported by Pylint.
Line: 26
Column: 1
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
Reported by Pylint.
Line: 27
Column: 1
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
Reported by Pylint.
Line: 29
Column: 1
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
requires_nccl_version,
Reported by Pylint.
Line: 30
Column: 1
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
Reported by Pylint.
Line: 31
Column: 1
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
Reported by Pylint.
test/distributed/test_c10d_gloo.py
340 issues
Line: 12
Column: 1
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 13
Column: 1
from itertools import groupby
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 20
Column: 1
sys.exit(0)
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
LOOPBACK,
gpus_for_rank,
Reported by Pylint.
Line: 21
Column: 1
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
LOOPBACK,
gpus_for_rank,
Task,
Reported by Pylint.
Line: 22
Column: 1
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
LOOPBACK,
gpus_for_rank,
Task,
ModuleForDdpCommHook,
Reported by Pylint.
Line: 30
Column: 1
ModuleForDdpCommHook,
SparseGradientModule,
)
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_gloo,
skip_if_lt_x_gpu,
Reported by Pylint.
Line: 31
Column: 1
SparseGradientModule,
)
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_gloo,
skip_if_lt_x_gpu,
simple_sparse_reduce_tests,
Reported by Pylint.
Line: 32
Column: 1
)
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_gloo,
skip_if_lt_x_gpu,
simple_sparse_reduce_tests,
skip_if_win32,
Reported by Pylint.
Line: 42
Column: 1
verify_ddp_error_logged,
skip_if_rocm,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_TSAN,
sandcastle_skip,
Reported by Pylint.
Line: 1976
Column: 13
"callback must return a torch.futures.Future object, but got",
):
def comm_hook(state: object, bucket: dist.GradBucket):
return 1
model.register_comm_hook(state=None, hook=comm_hook)
# Run forward
Reported by Pylint.
torch/fx/experimental/fx_acc/acc_ops.py
331 issues
Line: 44
Column: 58
("end_dim", "end_dim", this_arg_is_optional),
],
)
@register_acc_op_mapping(op_and_target=("call_function", torch.flatten))
@register_acc_op
def flatten(*, input, start_dim=0, end_dim=-1):
return torch.flatten(**locals())
Reported by Pylint.
Line: 47
Column: 12
@register_acc_op_mapping(op_and_target=("call_function", torch.flatten))
@register_acc_op
def flatten(*, input, start_dim=0, end_dim=-1):
return torch.flatten(**locals())
@register_acc_op_mapping(
op_and_target=(
"call_method",
Reported by Pylint.
Line: 150
Column: 34
size_node.meta = node.meta.copy()
return size_node
size_node.meta["type"] = torch.Size
getitem_node = node.graph.call_function(
getitem, kwargs={"input": size_node, "idx": node.kwargs["dim"]}
)
getitem_node.meta = node.meta.copy()
return getitem_node
Reported by Pylint.
Line: 165
Column: 58
return input + other
@register_acc_op_mapping(op_and_target=("call_function", torch.unsqueeze))
@register_acc_op
def unsqueeze(*, input, dim):
return torch.unsqueeze(**locals())
Reported by Pylint.
Line: 168
Column: 12
@register_acc_op_mapping(op_and_target=("call_function", torch.unsqueeze))
@register_acc_op
def unsqueeze(*, input, dim):
return torch.unsqueeze(**locals())
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.stack),
arg_replacement_tuples=[
Reported by Pylint.
Line: 172
Column: 37
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.stack),
arg_replacement_tuples=[
("tensors", "tensors"),
("dim", "dim"),
],
)
Reported by Pylint.
Line: 203
Column: 58
return cat_node
@register_acc_op_mapping(op_and_target=("call_function", torch.clamp))
@register_acc_op_mapping(op_and_target=("call_method", "clamp"))
@register_acc_op
def clamp(*, input, min, max):
return torch.clamp(**locals())
Reported by Pylint.
Line: 207
Column: 12
@register_acc_op_mapping(op_and_target=("call_method", "clamp"))
@register_acc_op
def clamp(*, input, min, max):
return torch.clamp(**locals())
@register_acc_op_mapping(op_and_target=("call_function", torch.cat))
@register_acc_op
def cat(*, tensors, dim):
Reported by Pylint.
Line: 210
Column: 58
return torch.clamp(**locals())
@register_acc_op_mapping(op_and_target=("call_function", torch.cat))
@register_acc_op
def cat(*, tensors, dim):
return torch.cat(**locals())
Reported by Pylint.
Line: 213
Column: 12
@register_acc_op_mapping(op_and_target=("call_function", torch.cat))
@register_acc_op
def cat(*, tensors, dim):
return torch.cat(**locals())
@register_acc_op_mapping(op_and_target=("call_function", torch.transpose))
@register_acc_op_mapping(op_and_target=("call_method", "transpose"))
@register_acc_op
Reported by Pylint.