The following issues were found
caffe2/python/mkl/rewrite_graph.py
24 issues
Line: 19
Column: 5
for (i, op) in reversed(list(enumerate(ops))):
if blob in op.output:
return i
raise ValueError("Failed to find last producer of blob, %s", blob)
def fix_BoxWithNMSLimit(net):
outputs = set()
for op in net.op:
Reported by Pylint.
Line: 1
Column: 1
import copy
from caffe2.proto import caffe2_pb2
from caffe2.python import core
Reported by Pylint.
Line: 11
Column: 1
from caffe2.python import core
def rewrite_init_net_simple(net):
for op in net.op:
op.device_option.device_type = caffe2_pb2.IDEEP
def last_producer(ops, blob):
for (i, op) in reversed(list(enumerate(ops))):
Reported by Pylint.
Line: 12
Column: 9
def rewrite_init_net_simple(net):
for op in net.op:
op.device_option.device_type = caffe2_pb2.IDEEP
def last_producer(ops, blob):
for (i, op) in reversed(list(enumerate(ops))):
if blob in op.output:
Reported by Pylint.
Line: 15
Column: 1
for op in net.op:
op.device_option.device_type = caffe2_pb2.IDEEP
def last_producer(ops, blob):
for (i, op) in reversed(list(enumerate(ops))):
if blob in op.output:
return i
raise ValueError("Failed to find last producer of blob, %s", blob)
Reported by Pylint.
Line: 16
Column: 13
op.device_option.device_type = caffe2_pb2.IDEEP
def last_producer(ops, blob):
for (i, op) in reversed(list(enumerate(ops))):
if blob in op.output:
return i
raise ValueError("Failed to find last producer of blob, %s", blob)
Reported by Pylint.
Line: 22
Column: 1
raise ValueError("Failed to find last producer of blob, %s", blob)
def fix_BoxWithNMSLimit(net):
outputs = set()
for op in net.op:
if op.type == 'BoxWithNMSLimit':
outputs.add(op.output[0])
outputs.add(op.output[1])
Reported by Pylint.
Line: 22
Column: 1
raise ValueError("Failed to find last producer of blob, %s", blob)
def fix_BoxWithNMSLimit(net):
outputs = set()
for op in net.op:
if op.type == 'BoxWithNMSLimit':
outputs.add(op.output[0])
outputs.add(op.output[1])
Reported by Pylint.
Line: 24
Column: 9
def fix_BoxWithNMSLimit(net):
outputs = set()
for op in net.op:
if op.type == 'BoxWithNMSLimit':
outputs.add(op.output[0])
outputs.add(op.output[1])
outputs.add(op.output[2])
for op in net.op:
Reported by Pylint.
Line: 29
Column: 9
outputs.add(op.output[0])
outputs.add(op.output[1])
outputs.add(op.output[2])
for op in net.op:
if op.type == 'CopyIDEEPToCPU':
if op.input[0] in outputs:
print("Chaning CopyIDEEPToCPU to Copy for {}".format(op.input[0]))
op.type = 'Copy'
op.device_option.device_type = caffe2_pb2.CPU
Reported by Pylint.
tools/coverage_plugins_package/src/coverage_plugins/jit_plugin.py
24 issues
Line: 11
Column: 1
marked as covered.
'''
from coverage import CoveragePlugin, CoverageData # type: ignore[import]
from inspect import ismodule, isclass, ismethod, isfunction, iscode, getsourcefile, getsourcelines
from time import time
from typing import Any
# All coverage stats resulting from this plug-in will be in a separate .coverage file that should be merged later with
Reported by Pylint.
Line: 45
Column: 3
filename = getsourcefile(obj)
# We don't want to report for filename = None
if filename:
# TODO: Because torch.jit._IgnoreContextManager relies on Python's `exec` method
# which doesn't generate source codelines, getsourcelines(obj) fails. For now,
# we just ignore the exception until we figure out a better way to
# implement torch.jit._IgnoreContextManager.
try:
sourcelines, starting_lineno = getsourcelines(obj)
Reported by Pylint.
Line: 58
Column: 29
cov_data.add_lines(line_data)
super().dynamic_context(frame)
def coverage_init(reg: Any, options: Any) -> None:
reg.add_dynamic_context(JitPlugin())
Reported by Pylint.
Line: 2
Column: 1
'''
This coverage plug-in attempts to cover JIT'd functions and methods that were previously missed in code coverage. Any
function and method that was passed through/decorated with torch.jit.script or torch.jit.script_method should now be
marked covered when coverage is run with this plug-in.
DISCLAIMER: note that this will mark the entire JIT'd function/method as covered without seeking proof that the
compiled code has been executed. This means that even if the code chunk is merely compiled and not run, it will get
marked as covered.
'''
Reported by Pylint.
Line: 3
Column: 1
'''
This coverage plug-in attempts to cover JIT'd functions and methods that were previously missed in code coverage. Any
function and method that was passed through/decorated with torch.jit.script or torch.jit.script_method should now be
marked covered when coverage is run with this plug-in.
DISCLAIMER: note that this will mark the entire JIT'd function/method as covered without seeking proof that the
compiled code has been executed. This means that even if the code chunk is merely compiled and not run, it will get
marked as covered.
'''
Reported by Pylint.
Line: 6
Column: 1
function and method that was passed through/decorated with torch.jit.script or torch.jit.script_method should now be
marked covered when coverage is run with this plug-in.
DISCLAIMER: note that this will mark the entire JIT'd function/method as covered without seeking proof that the
compiled code has been executed. This means that even if the code chunk is merely compiled and not run, it will get
marked as covered.
'''
from coverage import CoveragePlugin, CoverageData # type: ignore[import]
Reported by Pylint.
Line: 7
Column: 1
marked covered when coverage is run with this plug-in.
DISCLAIMER: note that this will mark the entire JIT'd function/method as covered without seeking proof that the
compiled code has been executed. This means that even if the code chunk is merely compiled and not run, it will get
marked as covered.
'''
from coverage import CoveragePlugin, CoverageData # type: ignore[import]
from inspect import ismodule, isclass, ismethod, isfunction, iscode, getsourcefile, getsourcelines
Reported by Pylint.
Line: 12
Column: 1
'''
from coverage import CoveragePlugin, CoverageData # type: ignore[import]
from inspect import ismodule, isclass, ismethod, isfunction, iscode, getsourcefile, getsourcelines
from time import time
from typing import Any
# All coverage stats resulting from this plug-in will be in a separate .coverage file that should be merged later with
# `coverage combine`. The convention seems to be .coverage.dotted.suffix based on the following link:
Reported by Pylint.
Line: 13
Column: 1
from coverage import CoveragePlugin, CoverageData # type: ignore[import]
from inspect import ismodule, isclass, ismethod, isfunction, iscode, getsourcefile, getsourcelines
from time import time
from typing import Any
# All coverage stats resulting from this plug-in will be in a separate .coverage file that should be merged later with
# `coverage combine`. The convention seems to be .coverage.dotted.suffix based on the following link:
# https://coverage.readthedocs.io/en/coverage-5.5/cmd.html#combining-data-files-coverage-combine
Reported by Pylint.
Line: 14
Column: 1
from coverage import CoveragePlugin, CoverageData # type: ignore[import]
from inspect import ismodule, isclass, ismethod, isfunction, iscode, getsourcefile, getsourcelines
from time import time
from typing import Any
# All coverage stats resulting from this plug-in will be in a separate .coverage file that should be merged later with
# `coverage combine`. The convention seems to be .coverage.dotted.suffix based on the following link:
# https://coverage.readthedocs.io/en/coverage-5.5/cmd.html#combining-data-files-coverage-combine
cov_data = CoverageData(basename=f'.coverage.jit.{time()}')
Reported by Pylint.
torch/quantization/fuser_method_mappings.py
24 issues
Line: 6
Column: 1
from typing import Union, Callable, Tuple, Dict, Optional, Type
from .utils import get_combined_dict
def fuse_conv_bn(conv, bn):
r"""Given the conv and bn modules, fuses them and returns the fused module
Args:
Reported by Pylint.
Line: 1
Column: 1
import torch.nn as nn
import torch.nn.intrinsic as nni
from typing import Union, Callable, Tuple, Dict, Optional, Type
from .utils import get_combined_dict
def fuse_conv_bn(conv, bn):
r"""Given the conv and bn modules, fuses them and returns the fused module
Reported by Pylint.
Line: 4
Column: 1
import torch.nn as nn
import torch.nn.intrinsic as nni
from typing import Union, Callable, Tuple, Dict, Optional, Type
from .utils import get_combined_dict
def fuse_conv_bn(conv, bn):
r"""Given the conv and bn modules, fuses them and returns the fused module
Reported by Pylint.
Line: 8
Column: 1
from .utils import get_combined_dict
def fuse_conv_bn(conv, bn):
r"""Given the conv and bn modules, fuses them and returns the fused module
Args:
conv: Module instance of type conv2d/conv3d
bn: Spatial BN instance that needs to be fused with the conv
Reported by Pylint.
Line: 21
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
>>> b1 = nn.BatchNorm2d(20)
>>> m2 = fuse_conv_bn(m1, b1)
"""
assert(conv.training == bn.training),\
"Conv and BN both must be in the same mode (train or eval)."
fused_module_class_map = {
nn.Conv1d: nni.ConvBn1d,
nn.Conv2d: nni.ConvBn2d,
Reported by Bandit.
Line: 31
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
}
if conv.training:
assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'
assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True'
assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True'
fused_module_class = fused_module_class_map.get((type(conv)), None)
if fused_module_class is not None:
return fused_module_class(conv, bn)
Reported by Bandit.
Line: 31
Column: 1
}
if conv.training:
assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'
assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True'
assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True'
fused_module_class = fused_module_class_map.get((type(conv)), None)
if fused_module_class is not None:
return fused_module_class(conv, bn)
Reported by Pylint.
Line: 32
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if conv.training:
assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'
assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True'
assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True'
fused_module_class = fused_module_class_map.get((type(conv)), None)
if fused_module_class is not None:
return fused_module_class(conv, bn)
else:
Reported by Bandit.
Line: 33
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if conv.training:
assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'
assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True'
assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True'
fused_module_class = fused_module_class_map.get((type(conv)), None)
if fused_module_class is not None:
return fused_module_class(conv, bn)
else:
raise NotImplementedError("Cannot fuse train modules: {}".format((conv, bn)))
Reported by Bandit.
Line: 33
Column: 1
if conv.training:
assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'
assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True'
assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True'
fused_module_class = fused_module_class_map.get((type(conv)), None)
if fused_module_class is not None:
return fused_module_class(conv, bn)
else:
raise NotImplementedError("Cannot fuse train modules: {}".format((conv, bn)))
Reported by Pylint.
.circleci/ecr_gc_docker/gc.py
24 issues
Line: 4
Column: 1
#!/usr/bin/env python3
import argparse
import boto3
import datetime
import pytz
import re
import sys
Reported by Pylint.
Line: 6
Column: 1
import argparse
import boto3
import datetime
import pytz
import re
import sys
def save_to_s3(project, data):
Reported by Pylint.
Line: 13
Column: 5
def save_to_s3(project, data):
table_content = ""
client = boto3.client("s3")
for repo, tag, window, age, pushed in data:
table_content += "<tr><td>{repo}</td><td>{tag}</td><td>{window}</td><td>{age}</td><td>{pushed}</td></tr>".format(
repo=repo, tag=tag, window=window, age=age, pushed=pushed
)
html_body = """
Reported by Pylint.
Line: 14
Column: 9
def save_to_s3(project, data):
table_content = ""
client = boto3.client("s3")
for repo, tag, window, age, pushed in data:
table_content += "<tr><td>{repo}</td><td>{tag}</td><td>{window}</td><td>{age}</td><td>{pushed}</td></tr>".format(
repo=repo, tag=tag, window=window, age=age, pushed=pushed
)
html_body = """
<html>
Reported by Pylint.
Line: 14
Column: 15
def save_to_s3(project, data):
table_content = ""
client = boto3.client("s3")
for repo, tag, window, age, pushed in data:
table_content += "<tr><td>{repo}</td><td>{tag}</td><td>{window}</td><td>{age}</td><td>{pushed}</td></tr>".format(
repo=repo, tag=tag, window=window, age=age, pushed=pushed
)
html_body = """
<html>
Reported by Pylint.
Line: 14
Column: 28
def save_to_s3(project, data):
table_content = ""
client = boto3.client("s3")
for repo, tag, window, age, pushed in data:
table_content += "<tr><td>{repo}</td><td>{tag}</td><td>{window}</td><td>{age}</td><td>{pushed}</td></tr>".format(
repo=repo, tag=tag, window=window, age=age, pushed=pushed
)
html_body = """
<html>
Reported by Pylint.
Line: 14
Column: 20
def save_to_s3(project, data):
table_content = ""
client = boto3.client("s3")
for repo, tag, window, age, pushed in data:
table_content += "<tr><td>{repo}</td><td>{tag}</td><td>{window}</td><td>{age}</td><td>{pushed}</td></tr>".format(
repo=repo, tag=tag, window=window, age=age, pushed=pushed
)
html_body = """
<html>
Reported by Pylint.
Line: 69
Column: 11
)
def repos(client):
paginator = client.get_paginator("describe_repositories")
pages = paginator.paginate(registryId="308535385114")
for page in pages:
for repo in page["repositories"]:
yield repo
Reported by Pylint.
Line: 73
Column: 13
paginator = client.get_paginator("describe_repositories")
pages = paginator.paginate(registryId="308535385114")
for page in pages:
for repo in page["repositories"]:
yield repo
def images(client, repository):
paginator = client.get_paginator("describe_images")
Reported by Pylint.
Line: 77
Column: 12
yield repo
def images(client, repository):
paginator = client.get_paginator("describe_images")
pages = paginator.paginate(
registryId="308535385114", repositoryName=repository["repositoryName"]
)
for page in pages:
Reported by Pylint.
test/package/test_digraph.py
24 issues
Line: 1
Column: 1
from torch.package._digraph import DiGraph
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
Reported by Pylint.
Line: 2
Column: 1
from torch.package._digraph import DiGraph
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
Reported by Pylint.
Line: 1
Column: 1
from torch.package._digraph import DiGraph
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
Reported by Pylint.
Line: 14
Column: 5
class TestDiGraph(PackageTestCase):
"""Test the DiGraph structure we use to represent dependencies in PackageExporter"""
def test_successors(self):
g = DiGraph()
g.add_edge("foo", "bar")
g.add_edge("foo", "baz")
g.add_node("qux")
Reported by Pylint.
Line: 15
Column: 9
"""Test the DiGraph structure we use to represent dependencies in PackageExporter"""
def test_successors(self):
g = DiGraph()
g.add_edge("foo", "bar")
g.add_edge("foo", "baz")
g.add_node("qux")
self.assertIn("bar", list(g.successors("foo")))
Reported by Pylint.
Line: 24
Column: 5
self.assertIn("baz", list(g.successors("foo")))
self.assertEqual(len(list(g.successors("qux"))), 0)
def test_predecessors(self):
g = DiGraph()
g.add_edge("foo", "bar")
g.add_edge("foo", "baz")
g.add_node("qux")
Reported by Pylint.
Line: 25
Column: 9
self.assertEqual(len(list(g.successors("qux"))), 0)
def test_predecessors(self):
g = DiGraph()
g.add_edge("foo", "bar")
g.add_edge("foo", "baz")
g.add_node("qux")
self.assertIn("foo", list(g.predecessors("bar")))
Reported by Pylint.
Line: 34
Column: 5
self.assertIn("foo", list(g.predecessors("baz")))
self.assertEqual(len(list(g.predecessors("qux"))), 0)
def test_successor_not_in_graph(self):
g = DiGraph()
with self.assertRaises(ValueError):
g.successors("not in graph")
def test_predecessor_not_in_graph(self):
Reported by Pylint.
Line: 35
Column: 9
self.assertEqual(len(list(g.predecessors("qux"))), 0)
def test_successor_not_in_graph(self):
g = DiGraph()
with self.assertRaises(ValueError):
g.successors("not in graph")
def test_predecessor_not_in_graph(self):
g = DiGraph()
Reported by Pylint.
Line: 39
Column: 5
with self.assertRaises(ValueError):
g.successors("not in graph")
def test_predecessor_not_in_graph(self):
g = DiGraph()
with self.assertRaises(ValueError):
g.predecessors("not in graph")
def test_node_attrs(self):
Reported by Pylint.
torch/fx/passes/split_utils.py
24 issues
Line: 7
Column: 1
import torch.fx
import torch.nn as nn
from torch.fx.graph import map_arg
from .tools_common import NodeList, NodeSet
@dataclass
class Component:
"""
Reported by Pylint.
Line: 35
Column: 1
gm: Optional[torch.fx.GraphModule] = None
class HolderModule(nn.Module):
"""
HolderModule is used to copy all the attributes from original module to submodules
that uses the attributes
"""
Reported by Pylint.
Line: 1
Column: 1
from dataclasses import dataclass, field
from typing import List, Optional, Dict
import torch.fx
import torch.nn as nn
from torch.fx.graph import map_arg
from .tools_common import NodeList, NodeSet
Reported by Pylint.
Line: 11
Column: 1
@dataclass
class Component:
"""
A component serves as a container for a subgraph we want to create afterwards.
"""
graph: torch.fx.Graph
Reported by Pylint.
Line: 32
Column: 5
# Mapping from get_attr node in original graph to get_attr node in `graph`.
getattr_maps: Dict[torch.fx.Node, torch.fx.Node] = field(default_factory=dict)
constructor_args: List[str] = field(default_factory=list)
gm: Optional[torch.fx.GraphModule] = None
class HolderModule(nn.Module):
"""
HolderModule is used to copy all the attributes from original module to submodules
Reported by Pylint.
Line: 43
Column: 16
def __init__(self, d):
super().__init__()
for k, v in d.items():
self.add_module(k, v)
def split_by_tags(gm: torch.fx.GraphModule, tags: List[str]) -> torch.fx.GraphModule:
"""
Reported by Pylint.
Line: 47
Column: 1
self.add_module(k, v)
def split_by_tags(gm: torch.fx.GraphModule, tags: List[str]) -> torch.fx.GraphModule:
"""
Splits a GraphModule using tags on its graph nodes. We honor the order of
tags. For example, we have tags = ["a", "b", "c"], the function will create
the initial submodules in the order of "a_0", "b_1", "c_2".
Reported by Pylint.
Line: 47
Column: 1
self.add_module(k, v)
def split_by_tags(gm: torch.fx.GraphModule, tags: List[str]) -> torch.fx.GraphModule:
"""
Splits a GraphModule using tags on its graph nodes. We honor the order of
tags. For example, we have tags = ["a", "b", "c"], the function will create
the initial submodules in the order of "a_0", "b_1", "c_2".
Reported by Pylint.
Line: 47
Column: 1
self.add_module(k, v)
def split_by_tags(gm: torch.fx.GraphModule, tags: List[str]) -> torch.fx.GraphModule:
"""
Splits a GraphModule using tags on its graph nodes. We honor the order of
tags. For example, we have tags = ["a", "b", "c"], the function will create
the initial submodules in the order of "a_0", "b_1", "c_2".
Reported by Pylint.
Line: 47
Column: 1
self.add_module(k, v)
def split_by_tags(gm: torch.fx.GraphModule, tags: List[str]) -> torch.fx.GraphModule:
"""
Splits a GraphModule using tags on its graph nodes. We honor the order of
tags. For example, we have tags = ["a", "b", "c"], the function will create
the initial submodules in the order of "a_0", "b_1", "c_2".
Reported by Pylint.
torch/cuda/amp/autocast_mode.py
24 issues
Line: 16
Column: 49
See :class:`torch.autocast`.
``torch.cuda.amp.autocast(args...)`` is equivalent to ``torch.autocast("cuda", args...)``
"""
def __init__(self, enabled=True, fast_dtype=torch.float16):
super().__init__("cuda", enabled=enabled, fast_dtype=fast_dtype)
# Casts Tensors and containers of Tensors. Special-cases passthroughs for strings and np.ndarrays, which
# may be falsely detected as "Iterables."
Reported by Pylint.
Line: 24
Column: 92
# may be falsely detected as "Iterables."
def _cast(value, dtype):
if isinstance(value, torch.Tensor):
is_eligible = (value.is_floating_point() and value.is_cuda and (value.dtype is not torch.float64))
return value.to(dtype) if is_eligible else value
elif isinstance(value, string_classes):
return value
elif HAS_NUMPY and isinstance(value, np.ndarray):
return value
Reported by Pylint.
Line: 85
Column: 42
@functools.wraps(fwd)
def decorate_fwd(*args, **kwargs):
if cast_inputs is None:
args[0]._fwd_used_autocast = torch.is_autocast_enabled()
return fwd(*args, **kwargs)
else:
autocast_context = torch.is_autocast_enabled()
args[0]._fwd_used_autocast = False
if autocast_context:
Reported by Pylint.
Line: 88
Column: 32
args[0]._fwd_used_autocast = torch.is_autocast_enabled()
return fwd(*args, **kwargs)
else:
autocast_context = torch.is_autocast_enabled()
args[0]._fwd_used_autocast = False
if autocast_context:
with autocast(enabled=False):
return fwd(*_cast(args, cast_inputs), **_cast(kwargs, cast_inputs))
else:
Reported by Pylint.
Line: 50
Column: 3
# this also works:
# @custom_fwd(cast_inputs=torch.float)
# def forward(...):
# TODO: when python 2 support is dropped, change the signature to
# def custom_fwd(fwd=None, *, cast_inputs=None) with internal changes following the link above.
def custom_fwd(fwd=None, **kwargs):
"""
Helper decorator for ``forward`` methods of custom autograd functions (subclasses of
:class:`torch.autograd.Function`). See the :ref:`example page<amp-custom-examples>` for more detail.
Reported by Pylint.
Line: 85
Column: 13
@functools.wraps(fwd)
def decorate_fwd(*args, **kwargs):
if cast_inputs is None:
args[0]._fwd_used_autocast = torch.is_autocast_enabled()
return fwd(*args, **kwargs)
else:
autocast_context = torch.is_autocast_enabled()
args[0]._fwd_used_autocast = False
if autocast_context:
Reported by Pylint.
Line: 89
Column: 13
return fwd(*args, **kwargs)
else:
autocast_context = torch.is_autocast_enabled()
args[0]._fwd_used_autocast = False
if autocast_context:
with autocast(enabled=False):
return fwd(*_cast(args, cast_inputs), **_cast(kwargs, cast_inputs))
else:
return fwd(*args, **kwargs)
Reported by Pylint.
Line: 110
Column: 23
"""
@functools.wraps(bwd)
def decorate_bwd(*args, **kwargs):
with autocast(args[0]._fwd_used_autocast):
return bwd(*args, **kwargs)
return decorate_bwd
Reported by Pylint.
Line: 1
Column: 1
import torch
import functools
import collections
try:
import numpy as np
HAS_NUMPY = True
except ModuleNotFoundError:
HAS_NUMPY = False
from torch._six import string_classes
Reported by Pylint.
Line: 2
Column: 1
import torch
import functools
import collections
try:
import numpy as np
HAS_NUMPY = True
except ModuleNotFoundError:
HAS_NUMPY = False
from torch._six import string_classes
Reported by Pylint.
torch/distributed/pipeline/sync/skip/portal.py
24 issues
Line: 21
Column: 1
import torch
from torch import Tensor
from ..copy import Context as CopyContext
from ..copy import Copy
from ..phony import get_phony
from ..stream import AbstractStream, get_device
__all__: List[str] = []
Reported by Pylint.
Line: 22
Column: 1
from torch import Tensor
from ..copy import Context as CopyContext
from ..copy import Copy
from ..phony import get_phony
from ..stream import AbstractStream, get_device
__all__: List[str] = []
Reported by Pylint.
Line: 23
Column: 1
from ..copy import Context as CopyContext
from ..copy import Copy
from ..phony import get_phony
from ..stream import AbstractStream, get_device
__all__: List[str] = []
Reported by Pylint.
Line: 24
Column: 1
from ..copy import Context as CopyContext
from ..copy import Copy
from ..phony import get_phony
from ..stream import AbstractStream, get_device
__all__: List[str] = []
class Portal:
Reported by Pylint.
Line: 51
Column: 30
tensor = self.use_tensor()
if tensor is None:
return get_phony(torch.device("cpu"), requires_grad=False)
return PortalBlue.apply(self, tensor)
def orange(self, phony: Tensor) -> Optional[Tensor]:
"""Creates a :class:`PortalOrange` which retrieves the hidden tensor
Reported by Pylint.
Line: 84
Column: 30
"""
if self.tensor is None:
return get_phony(torch.device("cpu"), requires_grad=False)
return PortalCopy.apply(self, prev_stream, next_stream, phony)
def check_tensor_life(self) -> None:
if self.tensor_life <= 0:
Reported by Pylint.
Line: 136
Column: 13
self.tensor_life -= 1
if self.tensor_life <= 0:
self.tensor = None
return tensor
def put_grad(self, grad: Tensor) -> None:
"""Stores a gradient into this portal."""
Reported by Pylint.
Line: 166
Column: 5
"""Hides a tensor from the autograd engine by a :class:`Portal`."""
@staticmethod
# type: ignore[override]
def forward(
ctx: Context,
portal: Portal,
# This tensor must be retrieved by portal.use_tensor().
tensor: Tensor,
Reported by Pylint.
Line: 179
Column: 5
return phony.detach()
@staticmethod
# type: ignore[override]
def backward(ctx: Context, grad_phony: Tensor,) -> Tuple[None, Tensor]:
# The paired PortalOrange should keep the gradient.
grad = ctx.portal.use_grad()
return None, grad
Reported by Pylint.
Line: 180
Column: 32
@staticmethod
# type: ignore[override]
def backward(ctx: Context, grad_phony: Tensor,) -> Tuple[None, Tensor]:
# The paired PortalOrange should keep the gradient.
grad = ctx.portal.use_grad()
return None, grad
Reported by Pylint.
torch/distributions/negative_binomial.py
24 issues
Line: 43
Column: 23
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(NegativeBinomial, _instance)
batch_shape = torch.Size(batch_shape)
new.total_count = self.total_count.expand(batch_shape)
if 'probs' in self.__dict__:
new.probs = self.probs.expand(batch_shape)
new._param = new.probs
if 'logits' in self.__dict__:
Reported by Pylint.
Line: 60
Column: 35
@property
def mean(self):
return self.total_count * torch.exp(self.logits)
@property
def variance(self):
return self.mean / torch.sigmoid(-self.logits)
Reported by Pylint.
Line: 64
Column: 28
@property
def variance(self):
return self.mean / torch.sigmoid(-self.logits)
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
Reported by Pylint.
Line: 67
Column: 5
return self.mean / torch.sigmoid(-self.logits)
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
Reported by Pylint.
Line: 71
Column: 5
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
@property
def param_shape(self):
return self._param.size()
Reported by Pylint.
Line: 82
Column: 47
def _gamma(self):
# Note we avoid validating because self.total_count can be zero.
return torch.distributions.Gamma(concentration=self.total_count,
rate=torch.exp(-self.logits),
validate_args=False)
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
rate = self._gamma.sample(sample_shape=sample_shape)
Reported by Pylint.
Line: 85
Column: 35
rate=torch.exp(-self.logits),
validate_args=False)
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
rate = self._gamma.sample(sample_shape=sample_shape)
return torch.poisson(rate)
def log_prob(self, value):
Reported by Pylint.
Line: 88
Column: 20
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
rate = self._gamma.sample(sample_shape=sample_shape)
return torch.poisson(rate)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
Reported by Pylint.
Line: 97
Column: 31
log_unnormalized_prob = (self.total_count * F.logsigmoid(-self.logits) +
value * F.logsigmoid(self.logits))
log_normalization = (-torch.lgamma(self.total_count + value) + torch.lgamma(1. + value) +
torch.lgamma(self.total_count))
return log_unnormalized_prob - log_normalization
Reported by Pylint.
Line: 97
Column: 72
log_unnormalized_prob = (self.total_count * F.logsigmoid(-self.logits) +
value * F.logsigmoid(self.logits))
log_normalization = (-torch.lgamma(self.total_count + value) + torch.lgamma(1. + value) +
torch.lgamma(self.total_count))
return log_unnormalized_prob - log_normalization
Reported by Pylint.
caffe2/python/operator_test/jsd_ops_test.py
24 issues
Line: 9
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
def entropy(p):
q = 1. - p
Reported by Pylint.
Line: 22
Column: 18
return [entropy(p / 2. + q / 2.) - entropy(p) / 2. - entropy(q) / 2.]
def jsd_grad(go, o, pq_list):
p, q = pq_list
m = (p + q) / 2.
return [np.log(p * (1 - m) / (1 - p) / m) / 2. * go, None]
Reported by Pylint.
Line: 30
Column: 41
class TestJSDOps(serial.SerializedTestCase):
@serial.given(n=st.integers(10, 100), **hu.gcs_cpu_only)
def test_bernoulli_jsd(self, n, gc, dc):
p = np.random.rand(n).astype(np.float32)
q = np.random.rand(n).astype(np.float32)
op = core.CreateOperator("BernoulliJSD", ["p", "q"], ["l"])
self.assertReferenceChecks(
device_option=gc,
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
Reported by Pylint.
Line: 13
Column: 1
import numpy as np
def entropy(p):
q = 1. - p
return -p * np.log(p) - q * np.log(q)
def jsd(p, q):
Reported by Pylint.
Line: 13
Column: 1
import numpy as np
def entropy(p):
q = 1. - p
return -p * np.log(p) - q * np.log(q)
def jsd(p, q):
Reported by Pylint.
Line: 14
Column: 5
def entropy(p):
q = 1. - p
return -p * np.log(p) - q * np.log(q)
def jsd(p, q):
return [entropy(p / 2. + q / 2.) - entropy(p) / 2. - entropy(q) / 2.]
Reported by Pylint.
Line: 18
Column: 1
return -p * np.log(p) - q * np.log(q)
def jsd(p, q):
return [entropy(p / 2. + q / 2.) - entropy(p) / 2. - entropy(q) / 2.]
def jsd_grad(go, o, pq_list):
p, q = pq_list
Reported by Pylint.
Line: 18
Column: 1
return -p * np.log(p) - q * np.log(q)
def jsd(p, q):
return [entropy(p / 2. + q / 2.) - entropy(p) / 2. - entropy(q) / 2.]
def jsd_grad(go, o, pq_list):
p, q = pq_list
Reported by Pylint.
Line: 18
Column: 1
return -p * np.log(p) - q * np.log(q)
def jsd(p, q):
return [entropy(p / 2. + q / 2.) - entropy(p) / 2. - entropy(q) / 2.]
def jsd_grad(go, o, pq_list):
p, q = pq_list
Reported by Pylint.