The following issues were found
torch/distributions/__init__.py
43 issues
Line: 74
Column: 1
loss.backward()
"""
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
Reported by Pylint.
Line: 75
Column: 1
"""
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
Reported by Pylint.
Line: 76
Column: 1
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
Reported by Pylint.
Line: 77
Column: 1
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
Reported by Pylint.
Line: 78
Column: 1
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
Reported by Pylint.
Line: 79
Column: 1
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
Reported by Pylint.
Line: 80
Column: 1
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
from .exponential import Exponential
Reported by Pylint.
Line: 81
Column: 1
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
from .exponential import Exponential
from .fishersnedecor import FisherSnedecor
Reported by Pylint.
Line: 82
Column: 1
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
from .exponential import Exponential
from .fishersnedecor import FisherSnedecor
from .gamma import Gamma
Reported by Pylint.
Line: 83
Column: 1
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
from .exponential import Exponential
from .fishersnedecor import FisherSnedecor
from .gamma import Gamma
from .geometric import Geometric
Reported by Pylint.
test/test_autocast.py
43 issues
Line: 2
Column: 1
import collections
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
class TestAutocastCPU(TestCase):
def setUp(self):
super(TestAutocastCPU, self).setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))
Reported by Pylint.
Line: 3
Column: 1
import collections
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
class TestAutocastCPU(TestCase):
def setUp(self):
super(TestAutocastCPU, self).setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))
Reported by Pylint.
Line: 4
Column: 1
import collections
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
class TestAutocastCPU(TestCase):
def setUp(self):
super(TestAutocastCPU, self).setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))
Reported by Pylint.
Line: 106
Column: 76
def test_autocast_nn_bf16(self):
for op, args in self.autocast_lists.nn_bf16:
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
Reported by Pylint.
Line: 106
Column: 76
def test_autocast_nn_bf16(self):
for op, args in self.autocast_lists.nn_bf16:
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
Reported by Pylint.
Line: 116
Column: 75
def test_autocast_nn_fp32(self):
for op_with_args in self.autocast_lists.nn_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn, add_kwargs=maybe_kwargs)
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
Reported by Pylint.
Line: 116
Column: 75
def test_autocast_nn_fp32(self):
for op_with_args in self.autocast_lists.nn_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn, add_kwargs=maybe_kwargs)
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
Reported by Pylint.
Line: 1
Column: 1
import collections
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
class TestAutocastCPU(TestCase):
def setUp(self):
super(TestAutocastCPU, self).setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))
Reported by Pylint.
Line: 6
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
class TestAutocastCPU(TestCase):
def setUp(self):
super(TestAutocastCPU, self).setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))
def tearDown(self):
Reported by Pylint.
Line: 7
Column: 5
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
class TestAutocastCPU(TestCase):
def setUp(self):
super(TestAutocastCPU, self).setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))
def tearDown(self):
del self.autocast_lists
Reported by Pylint.
caffe2/contrib/tensorboard/tensorboard_exporter.py
43 issues
Line: 318
Column: 12
# Note: this will inspect the workspace for better or worse.
shapes, _ = workspace.InferShapesAndTypes(nets)
return shapes
except Exception as e:
logging.warning('Failed to compute shapes: %s', e)
return {}
def nets_to_graph_def(nets, shapes=None, **kwargs):
Reported by Pylint.
Line: 1
Column: 1
from builtins import bytes
import copy
import logging
import os
Reported by Pylint.
Line: 30
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def _make_unique_name(seen, name, min_version=0):
assert name is not None
i = min_version
x = '%s_%d' % (name, i) if i else name
while x in seen:
i += 1
x = '%s_%d' % (name, i)
Reported by Bandit.
Line: 32
Column: 5
def _make_unique_name(seen, name, min_version=0):
assert name is not None
i = min_version
x = '%s_%d' % (name, i) if i else name
while x in seen:
i += 1
x = '%s_%d' % (name, i)
seen.add(x)
return x
Reported by Pylint.
Line: 35
Column: 9
x = '%s_%d' % (name, i) if i else name
while x in seen:
i += 1
x = '%s_%d' % (name, i)
seen.add(x)
return x
def _convert_to_ssa(shapes, track_blob_names, ops):
Reported by Pylint.
Line: 46
Column: 5
I.e. blobs will be renamed so that each blob is produced only once.
"""
ir = core.IR(ops)
seen = set()
versioned = {}
shapes2 = {}
track_blob_names2 = {}
Reported by Pylint.
Line: 53
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
track_blob_names2 = {}
def ssa_name(name, versions):
assert name in versions
version = versions[name]
if (name, version) in versioned:
return versioned[(name, version)]
# Always setting name2 = `{name}_{version}` would work, but we also try
# to avoid a trailing `_0`, so we have to be careful not to introduce
Reported by Bandit.
Line: 70
Column: 10
track_blob_names2[name2] = track_blob_names[name]
return name2
for (op, ssa) in zip(ops, ir.ssa):
assert op is ssa.op
inputs = list(op.input)
outputs = list(op.output)
del op.input[:]
del op.output[:]
Reported by Pylint.
Line: 71
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
return name2
for (op, ssa) in zip(ops, ir.ssa):
assert op is ssa.op
inputs = list(op.input)
outputs = list(op.output)
del op.input[:]
del op.output[:]
op.input.extend(ssa_name(name, ssa.in_versions) for name in inputs)
Reported by Bandit.
Line: 88
Column: 9
def _get_blob_names(ops):
names = set()
for op in ops:
names.update(op.input)
names.update(op.output)
return {name: name for name in names}
Reported by Pylint.
caffe2/python/onnx/frontend.py
43 issues
Line: 19
Column: 1
import re
from caffe2.python import core as caffe2_core
from onnx import (checker, helper, numpy_helper, mapping,
GraphProto, NodeProto, TensorProto, OperatorSetIdProto)
from onnx.helper import make_tensor_value_info, make_model
import numpy as np
from caffe2.python.onnx.helper import c2_native_run_net
Reported by Pylint.
Line: 21
Column: 1
from caffe2.python import core as caffe2_core
from onnx import (checker, helper, numpy_helper, mapping,
GraphProto, NodeProto, TensorProto, OperatorSetIdProto)
from onnx.helper import make_tensor_value_info, make_model
import numpy as np
from caffe2.python.onnx.helper import c2_native_run_net
import caffe2.python._import_c_extension as C
Reported by Pylint.
Line: 75
Column: 19
_special_operators = {}
# Dummy name generator
_dummy_name = C.DummyName()
@classmethod
def dummy_name(cls):
return cls._dummy_name.new_dummy_name()
Reported by Pylint.
Line: 134
Column: 12
@classmethod
def caffe2_op_to_onnx_node(cls, op_def, shapes):
if C.support_onnx_export(op_def.type):
node_strs, tensor_strs = C.export_to_onnx(cls._dummy_name, op_def.SerializeToString(), shapes)
nodes = []
for s in node_strs:
node = NodeProto()
node.ParseFromString(s)
Reported by Pylint.
Line: 135
Column: 38
@classmethod
def caffe2_op_to_onnx_node(cls, op_def, shapes):
if C.support_onnx_export(op_def.type):
node_strs, tensor_strs = C.export_to_onnx(cls._dummy_name, op_def.SerializeToString(), shapes)
nodes = []
for s in node_strs:
node = NodeProto()
node.ParseFromString(s)
nodes.append(node)
Reported by Pylint.
Line: 117
Column: 53
return cls._common_caffe2_arg_to_onnx_attr(op_def, arg)
@classmethod
def _common_caffe2_op_to_onnx_node(cls, op_def, shapes):
node_def = NodeProto()
node_def.name = op_def.name
node_def.op_type = cls._renamed_operators.get(op_def.type, op_def.type)
Reported by Pylint.
Line: 268
Column: 17
redundant_output = set(vi.name for vi in graph_def.output) - all_output
if redundant_output:
logger.warning(
'There are graph output not produced by any node or initializer: {}'
'! Will drop them.'.format(', '.join(redundant_output)))
graph_def.output.extend(
make_tensor_value_info(
name=name,
elem_type=value_info[name][0],
Reported by Pylint.
Line: 306
Column: 42
return cls._ssa_rewrite(net, init_net, value_info)
@classmethod
def _ssa_rewrite(cls, net, init_net, value_info):
def ssa_name(name, version, version_cnt=None):
if version == 0:
return name
if version_cnt and len(version_cnt.get(name, {})) <= 1:
return name
Reported by Pylint.
Line: 22
Column: 1
from onnx import (checker, helper, numpy_helper, mapping,
GraphProto, NodeProto, TensorProto, OperatorSetIdProto)
from onnx.helper import make_tensor_value_info, make_model
import numpy as np
from caffe2.python.onnx.helper import c2_native_run_net
import caffe2.python._import_c_extension as C
Reported by Pylint.
Line: 24
Column: 1
from onnx.helper import make_tensor_value_info, make_model
import numpy as np
from caffe2.python.onnx.helper import c2_native_run_net
import caffe2.python._import_c_extension as C
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
Reported by Pylint.
torch/nn/modules/__init__.py
43 issues
Line: 1
Column: 1
from .module import Module
from .linear import Identity, Linear, Bilinear, LazyLinear
from .conv import Conv1d, Conv2d, Conv3d, \
ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
Hardsigmoid, Hardswish, SiLU, Mish
Reported by Pylint.
Line: 2
Column: 1
from .module import Module
from .linear import Identity, Linear, Bilinear, LazyLinear
from .conv import Conv1d, Conv2d, Conv3d, \
ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
Hardsigmoid, Hardswish, SiLU, Mish
Reported by Pylint.
Line: 3
Column: 1
from .module import Module
from .linear import Identity, Linear, Bilinear, LazyLinear
from .conv import Conv1d, Conv2d, Conv3d, \
ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
Hardsigmoid, Hardswish, SiLU, Mish
Reported by Pylint.
Line: 6
Column: 1
from .conv import Conv1d, Conv2d, Conv3d, \
ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
Hardsigmoid, Hardswish, SiLU, Mish
from .loss import L1Loss, NLLLoss, KLDivLoss, MSELoss, BCELoss, BCEWithLogitsLoss, NLLLoss2d, \
CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \
Reported by Pylint.
Line: 10
Column: 1
Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
Hardsigmoid, Hardswish, SiLU, Mish
from .loss import L1Loss, NLLLoss, KLDivLoss, MSELoss, BCELoss, BCEWithLogitsLoss, NLLLoss2d, \
CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \
MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, SmoothL1Loss, HuberLoss, \
SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss, PoissonNLLLoss, GaussianNLLLoss
from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
Reported by Pylint.
Line: 14
Column: 1
CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \
MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, SmoothL1Loss, HuberLoss, \
SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss, PoissonNLLLoss, GaussianNLLLoss
from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, \
AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
Reported by Pylint.
Line: 15
Column: 1
MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, SmoothL1Loss, HuberLoss, \
SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss, PoissonNLLLoss, GaussianNLLLoss
from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, \
AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
Reported by Pylint.
Line: 18
Column: 1
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, \
AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
LazyInstanceNorm1d, LazyInstanceNorm2d, LazyInstanceNorm3d
from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
from .dropout import Dropout, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout
Reported by Pylint.
Line: 20
Column: 1
AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
LazyInstanceNorm1d, LazyInstanceNorm2d, LazyInstanceNorm3d
from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
from .dropout import Dropout, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout
from .padding import ReflectionPad1d, ReflectionPad2d, ReflectionPad3d, ReplicationPad1d, ReplicationPad2d, \
ReplicationPad3d, ZeroPad2d, ConstantPad1d, ConstantPad2d, ConstantPad3d
Reported by Pylint.
Line: 22
Column: 1
LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
LazyInstanceNorm1d, LazyInstanceNorm2d, LazyInstanceNorm3d
from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
from .dropout import Dropout, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout
from .padding import ReflectionPad1d, ReflectionPad2d, ReflectionPad3d, ReplicationPad1d, ReplicationPad2d, \
ReplicationPad3d, ZeroPad2d, ConstantPad1d, ConstantPad2d, ConstantPad3d
from .sparse import Embedding, EmbeddingBag
from .rnn import RNNBase, RNN, LSTM, GRU, \
Reported by Pylint.
test/distributed/pipeline/sync/skip/test_leak.py
42 issues
Line: 7
Column: 1
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
Reported by Pylint.
Line: 8
Column: 1
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker
Reported by Pylint.
Line: 9
Column: 1
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker
Reported by Pylint.
Line: 11
Column: 1
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker
@skippable(stash=["skip"])
Reported by Pylint.
Line: 12
Column: 1
from torch import nn
from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker
@skippable(stash=["skip"])
class Stash(nn.Module):
Reported by Pylint.
Line: 13
Column: 1
from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker
@skippable(stash=["skip"])
class Stash(nn.Module):
def forward(self, input):
Reported by Pylint.
Line: 18
Column: 23
@skippable(stash=["skip"])
class Stash(nn.Module):
def forward(self, input):
yield stash("skip", input)
return input # noqa: B901
@skippable(pop=["skip"])
Reported by Pylint.
Line: 25
Column: 23
@skippable(pop=["skip"])
class Pop(nn.Module):
def forward(self, input):
skip = yield pop("skip")
return input + skip
@pytest.mark.parametrize("train", [True, False], ids=["train", "eval"])
Reported by Pylint.
Line: 32
Column: 50
@pytest.mark.parametrize("train", [True, False], ids=["train", "eval"])
@pytest.mark.parametrize("checkpoint", ["always", "except_last", "never"])
def test_delete_portal_tensor(train, checkpoint, setup_rpc):
# Without checkpointing:
# +- Stash --+ +--- Pop ----+ - - - layers
# | 2,blue,1 |--| 1,orange,0 | - - - tensor_life and portal function
# +----------+ +------------+
#
Reported by Pylint.
Line: 59
Column: 5
stash_ = Stash()
@stash_.register_forward_hook
def check_portal_tensor_after_stash(*_):
if is_checkpointing():
assert portal_tensor_life_is(2)
elif is_recomputing():
assert portal_tensor_life_is(0)
else:
Reported by Pylint.
test/jit/test_warn.py
42 issues
Line: 5
Column: 1
import sys
import io
import torch
import warnings
from contextlib import redirect_stderr
from torch.testing import FileCheck
# Make the helper files in test/ importable
Reported by Pylint.
Line: 8
Column: 1
import torch
import warnings
from contextlib import redirect_stderr
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 13
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
import io
import torch
import warnings
from contextlib import redirect_stderr
from torch.testing import FileCheck
Reported by Pylint.
Line: 6
Column: 1
import io
import torch
import warnings
from contextlib import redirect_stderr
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 7
Column: 1
import torch
import warnings
from contextlib import redirect_stderr
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 13
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 21
Column: 1
"instead.")
class TestWarn(JitTestCase):
def test_warn(self):
@torch.jit.script
def fn():
warnings.warn("I am warning you")
Reported by Pylint.
Line: 22
Column: 5
class TestWarn(JitTestCase):
def test_warn(self):
@torch.jit.script
def fn():
warnings.warn("I am warning you")
f = io.StringIO()
Reported by Pylint.
Line: 22
Column: 5
class TestWarn(JitTestCase):
def test_warn(self):
@torch.jit.script
def fn():
warnings.warn("I am warning you")
f = io.StringIO()
Reported by Pylint.
caffe2/python/gradient_checker.py
42 issues
Line: 125
Column: 9
input_to_check, step_size=0.0001,
threshold=0.05, print_net=True):
net_results, net_grads, full_net = _get_grad(
net, [], outputs_with_grad, input_values, [input_to_check])
analytic_grad = net_grads[input_to_check]
def GetLoss(new_value):
workspace.blobs[input_to_check] = new_value
Reported by Pylint.
Line: 267
Column: 3
op.device_option.CopyFrom(self._device_option)
if grad_ops is None:
# TODO(jiayq): use the gradient registration instead of the old
# hack.
grad_ops, g_input = getGradientForOp(op)
_input_device_options = input_device_options or \
Reported by Pylint.
Line: 283
Column: 9
# Get the loss and gradient for the original.
grad_name = g_input[input_to_check]
loss, grad = self.GetLossAndGrad(
op, grad_ops, inputs, op.input, input_to_check, grad_name,
outputs_with_grads,
)
grad_estimate = np.zeros_like(inputs[input_to_check])
if grad_estimate.shape != grad.shape:
Reported by Pylint.
Line: 1
Column: 1
## @package gradient_checker
# Module caffe2.python.gradient_checker
import os
import numpy as np
Reported by Pylint.
Line: 15
Column: 1
from caffe2.proto import caffe2_pb2
def getGradientForOp(op):
return core.GradientRegistry.GetGradientForOp(
op, [s + '_grad' for s in op.output])
def _get_grad_blob(grad_map, input_to_check):
Reported by Pylint.
Line: 15
Column: 1
from caffe2.proto import caffe2_pb2
def getGradientForOp(op):
return core.GradientRegistry.GetGradientForOp(
op, [s + '_grad' for s in op.output])
def _get_grad_blob(grad_map, input_to_check):
Reported by Pylint.
Line: 15
Column: 1
from caffe2.proto import caffe2_pb2
def getGradientForOp(op):
return core.GradientRegistry.GetGradientForOp(
op, [s + '_grad' for s in op.output])
def _get_grad_blob(grad_map, input_to_check):
Reported by Pylint.
Line: 29
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
# If grad_blob is not a single blob, it should be a gradient slice.
# To make it comparable with the estimiated gradient which is dense,
# we need to first convert grad_blob to dense gradient.
assert isinstance(grad_blob, core.GradientSlice)
dense_grad = 'tmp_dense_grad'
sparse_to_dense_op = core.CreateOperator(
'SparseToDense',
[grad_blob.indices, grad_blob.values, input_to_check],
dense_grad,
Reported by Bandit.
Line: 48
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
workspace.blobs[name] = value
for input_to_check in inputs_with_grads:
assert input_to_check in grad_map, (
'{} has no gradient, cannot check net gradient.'.format(
input_to_check))
assert str(input_to_check) in workspace.blobs
workspace.RunNetOnce(grad_net)
Reported by Bandit.
Line: 51
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert input_to_check in grad_map, (
'{} has no gradient, cannot check net gradient.'.format(
input_to_check))
assert str(input_to_check) in workspace.blobs
workspace.RunNetOnce(grad_net)
forward_results = [(output, workspace.blobs[output]) for output in outputs]
grads = {input_to_check: _get_grad_blob(grad_map, input_to_check)
for input_to_check in inputs_with_grads}
Reported by Bandit.
test/cpp_api_parity/utils.py
42 issues
Line: 7
Column: 1
import warnings
import shutil
import torch
import torch.utils.cpp_extension
import torch.testing._internal.common_nn as common_nn
from torch.testing._internal.common_cuda import TEST_CUDA
# Note that this namedtuple is for C++ parity test mechanism's internal use.
Reported by Pylint.
Line: 8
Column: 1
import shutil
import torch
import torch.utils.cpp_extension
import torch.testing._internal.common_nn as common_nn
from torch.testing._internal.common_cuda import TEST_CUDA
# Note that this namedtuple is for C++ parity test mechanism's internal use.
# For guidance on how to add a new C++ parity test, please see
Reported by Pylint.
Line: 9
Column: 1
import torch
import torch.utils.cpp_extension
import torch.testing._internal.common_nn as common_nn
from torch.testing._internal.common_cuda import TEST_CUDA
# Note that this namedtuple is for C++ parity test mechanism's internal use.
# For guidance on how to add a new C++ parity test, please see
# NOTE [How to check NN module / functional API parity between Python and C++ frontends]
Reported by Pylint.
Line: 10
Column: 1
import torch
import torch.utils.cpp_extension
import torch.testing._internal.common_nn as common_nn
from torch.testing._internal.common_cuda import TEST_CUDA
# Note that this namedtuple is for C++ parity test mechanism's internal use.
# For guidance on how to add a new C++ parity test, please see
# NOTE [How to check NN module / functional API parity between Python and C++ frontends]
TorchNNModuleTestParams = namedtuple(
Reported by Pylint.
Line: 212
Column: 31
def add_cpp_forward_args(args):
args_stmts = []
for arg_name, _ in args:
args_stmts.append('auto {} = arg_dict.at("{}")'.format(arg_name, arg_name))
cpp_forward_args_symbols.append(arg_name)
return args_stmts
cpp_forward_input_args_stmts = set_cpp_tensors_requires_grad(move_cpp_tensors_to_device(
add_cpp_forward_args(test_params.arg_dict['input']), device), test_params.arg_dict['input'])
Reported by Pylint.
Line: 226
Column: 37
# Build the list of other arguments needed
cpp_other_args_stmts = []
for arg_name, _ in test_params.arg_dict['other']:
cpp_other_args_stmts.append('auto {} = arg_dict.at("{}")'.format(arg_name, arg_name))
cpp_other_args_stmts = move_cpp_tensors_to_device(cpp_other_args_stmts, device)
cpp_args_construction_stmts = cpp_forward_input_args_stmts + cpp_forward_target_args_stmts + \
cpp_forward_extra_args_stmts + cpp_other_args_stmts
Reported by Pylint.
Line: 285
Column: 58
for i, arg in enumerate(args):
arg_dict[arg_type].append(CppArg(name=arg_type_prefix + str(i), value=arg))
put_args_into_arg_dict('input', 'i', convert_to_list(test_instance._get_input()))
if is_criterion_test(test_instance):
put_args_into_arg_dict('target', 't', convert_to_list(test_instance._get_target()))
if test_instance.extra_args:
put_args_into_arg_dict('extra_args', 'e', convert_to_list(test_instance.extra_args))
Reported by Pylint.
Line: 287
Column: 63
put_args_into_arg_dict('input', 'i', convert_to_list(test_instance._get_input()))
if is_criterion_test(test_instance):
put_args_into_arg_dict('target', 't', convert_to_list(test_instance._get_target()))
if test_instance.extra_args:
put_args_into_arg_dict('extra_args', 'e', convert_to_list(test_instance.extra_args))
cpp_var_map = test_params_dict.get('cpp_var_map', {})
for arg_name, arg_value in cpp_var_map.items():
Reported by Pylint.
Line: 295
Column: 70
for arg_name, arg_value in cpp_var_map.items():
if isinstance(arg_value, str):
if arg_value == '_get_input()':
arg_dict['other'].append(CppArg(name=arg_name, value=test_instance._get_input()))
else:
raise RuntimeError("`{}` has unsupported string value: {}".format(arg_name, arg_value))
elif isinstance(arg_value, torch.Tensor):
arg_dict['other'].append(CppArg(name=arg_name, value=arg_value))
else:
Reported by Pylint.
Line: 355
Column: 16
# Don't block the process if this fails, but show the error message as warning.
try:
shutil.rmtree(folder_path)
except Exception as e:
warnings.warn("Non-blocking folder removal fails with the following error:\n{}".format(str(e)))
Reported by Pylint.
torch/csrc/deploy/example/examples.py
42 issues
Line: 3
Column: 1
from typing import Tuple, List, Dict
import torch
import torch.nn as nn
from torch import Tensor
class Simple(torch.nn.Module):
def __init__(self, N, M):
Reported by Pylint.
Line: 4
Column: 1
from typing import Tuple, List, Dict
import torch
import torch.nn as nn
from torch import Tensor
class Simple(torch.nn.Module):
def __init__(self, N, M):
Reported by Pylint.
Line: 5
Column: 1
import torch
import torch.nn as nn
from torch import Tensor
class Simple(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
Reported by Pylint.
Line: 13
Column: 23
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
output = self.weight + input
return output
def load_library():
Reported by Pylint.
Line: 131
Column: 15
return (input1 * -1, input2 * -1)
def make_prediction(
self, input: List[Tuple[Tensor, Tensor]]
) -> List[Tuple[Tensor, Tensor]]:
return [self.forward(i[0], i[1]) for i in input]
def make_batch(
self, mega_batch: List[Tuple[Tensor, Tensor, int]], goals: Dict[str, str]
Reported by Pylint.
Line: 146
Column: 5
class MultiReturn(torch.nn.Module):
def __init__(self):
super(MultiReturn, self).__init__()
def forward(self, t):
# type: (Tuple[Tensor, Tensor]) -> Tuple[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]]
a, b = t
Reported by Pylint.
Line: 1
Column: 1
from typing import Tuple, List, Dict
import torch
import torch.nn as nn
from torch import Tensor
class Simple(torch.nn.Module):
def __init__(self, N, M):
Reported by Pylint.
Line: 8
Column: 1
from torch import Tensor
class Simple(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
Reported by Pylint.
Line: 8
Column: 1
from torch import Tensor
class Simple(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
Reported by Pylint.
Line: 9
Column: 5
class Simple(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
output = self.weight + input
Reported by Pylint.