The following issues were found
test/package/package_b/subpackage_1.py
7 issues
Line: 1
Column: 1
result = "subpackage_1"
class PackageBSubpackage1Object_0:
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
Reported by Pylint.
Line: 1
Column: 1
result = "subpackage_1"
class PackageBSubpackage1Object_0:
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
Reported by Pylint.
Line: 4
Column: 1
result = "subpackage_1"
class PackageBSubpackage1Object_0:
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
Reported by Pylint.
Line: 4
Column: 1
result = "subpackage_1"
class PackageBSubpackage1Object_0:
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
Reported by Pylint.
Line: 4
Column: 1
result = "subpackage_1"
class PackageBSubpackage1Object_0:
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
Reported by Pylint.
Line: 10
Column: 5
def __init__(self, obj):
self.obj = obj
def return_result(self):
return result
Reported by Pylint.
Line: 10
Column: 5
def __init__(self, obj):
self.obj = obj
def return_result(self):
return result
Reported by Pylint.
test/test_kernel_launch_checks.py
7 issues
Line: 1
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._check_kernel_launches import check_cuda_kernel_launches, check_code_for_cuda_kernel_launches
class AlwaysCheckCudaLaunchTest(TestCase):
def test_check_code(self):
"""Verifies that the regex works for a few different situations"""
# Try some different spacings
Reported by Pylint.
Line: 2
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._check_kernel_launches import check_cuda_kernel_launches, check_code_for_cuda_kernel_launches
class AlwaysCheckCudaLaunchTest(TestCase):
def test_check_code(self):
"""Verifies that the regex works for a few different situations"""
# Try some different spacings
Reported by Pylint.
Line: 1
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._check_kernel_launches import check_cuda_kernel_launches, check_code_for_cuda_kernel_launches
class AlwaysCheckCudaLaunchTest(TestCase):
def test_check_code(self):
"""Verifies that the regex works for a few different situations"""
# Try some different spacings
Reported by Pylint.
Line: 2
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._check_kernel_launches import check_cuda_kernel_launches, check_code_for_cuda_kernel_launches
class AlwaysCheckCudaLaunchTest(TestCase):
def test_check_code(self):
"""Verifies that the regex works for a few different situations"""
# Try some different spacings
Reported by Pylint.
Line: 5
Column: 1
from torch.testing._check_kernel_launches import check_cuda_kernel_launches, check_code_for_cuda_kernel_launches
class AlwaysCheckCudaLaunchTest(TestCase):
def test_check_code(self):
"""Verifies that the regex works for a few different situations"""
# Try some different spacings
self.assertEqual(2, check_code_for_cuda_kernel_launches("""
Reported by Pylint.
Line: 71
Column: 5
C10_CUDA_KERNEL_LAUNCH_CHECK();
"""))
def test_check_cuda_launches(self):
unsafeLaunchesCount = check_cuda_kernel_launches()
self.assertTrue(unsafeLaunchesCount == 0)
if __name__ == '__main__':
Reported by Pylint.
Line: 72
Column: 9
"""))
def test_check_cuda_launches(self):
unsafeLaunchesCount = check_cuda_kernel_launches()
self.assertTrue(unsafeLaunchesCount == 0)
if __name__ == '__main__':
run_tests()
Reported by Pylint.
torch/distributions/exp_family.py
7 issues
Line: 1
Column: 1
import torch
from torch.distributions.distribution import Distribution
class ExponentialFamily(Distribution):
r"""
ExponentialFamily is the abstract base class for probability distributions belonging to an
exponential family, whose probability mass/density function has the form is defined below
Reported by Pylint.
Line: 14
Column: 1
p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle - F(\theta) + k(x))
where :math:`\theta` denotes the natural parameters, :math:`t(x)` denotes the sufficient statistic,
:math:`F(\theta)` is the log normalizer function for a given family and :math:`k(x)` is the carrier
measure.
Note:
This class is an intermediary between the `Distribution` class and distributions which belong
Reported by Pylint.
Line: 15
Column: 1
p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle - F(\theta) + k(x))
where :math:`\theta` denotes the natural parameters, :math:`t(x)` denotes the sufficient statistic,
:math:`F(\theta)` is the log normalizer function for a given family and :math:`k(x)` is the carrier
measure.
Note:
This class is an intermediary between the `Distribution` class and distributions which belong
to an exponential family mainly to check the correctness of the `.entropy()` and analytic KL
Reported by Pylint.
Line: 19
Column: 1
measure.
Note:
This class is an intermediary between the `Distribution` class and distributions which belong
to an exponential family mainly to check the correctness of the `.entropy()` and analytic KL
divergence methods. We use this class to compute the entropy and KL divergence using the AD
framework and Bregman divergences (courtesy of: Frank Nielsen and Richard Nock, Entropies and
Cross-entropies of Exponential Families).
"""
Reported by Pylint.
Line: 22
Column: 1
This class is an intermediary between the `Distribution` class and distributions which belong
to an exponential family mainly to check the correctness of the `.entropy()` and analytic KL
divergence methods. We use this class to compute the entropy and KL divergence using the AD
framework and Bregman divergences (courtesy of: Frank Nielsen and Richard Nock, Entropies and
Cross-entropies of Exponential Families).
"""
@property
def _natural_params(self):
Reported by Pylint.
Line: 58
Column: 13
lg_normal = self._log_normalizer(*nparams)
gradients = torch.autograd.grad(lg_normal.sum(), nparams, create_graph=True)
result += lg_normal
for np, g in zip(nparams, gradients):
result -= np * g
return result
Reported by Pylint.
Line: 58
Column: 17
lg_normal = self._log_normalizer(*nparams)
gradients = torch.autograd.grad(lg_normal.sum(), nparams, create_graph=True)
result += lg_normal
for np, g in zip(nparams, gradients):
result -= np * g
return result
Reported by Pylint.
torch/distributions/pareto.py
7 issues
Line: 8
Column: 1
from torch.distributions.utils import broadcast_all
class Pareto(TransformedDistribution):
r"""
Samples from a Pareto Type 1 distribution.
Example::
Reported by Pylint.
Line: 1
Column: 1
from torch.distributions import constraints
from torch.distributions.exponential import Exponential
from torch.distributions.transformed_distribution import TransformedDistribution
from torch.distributions.transforms import AffineTransform, ExpTransform
from torch.distributions.utils import broadcast_all
class Pareto(TransformedDistribution):
r"""
Reported by Pylint.
Line: 28
Column: 9
self.scale, self.alpha = broadcast_all(scale, alpha)
base_dist = Exponential(self.alpha, validate_args=validate_args)
transforms = [ExpTransform(), AffineTransform(loc=0, scale=self.scale)]
super(Pareto, self).__init__(base_dist, transforms, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Pareto, _instance)
new.scale = self.scale.expand(batch_shape)
new.alpha = self.alpha.expand(batch_shape)
Reported by Pylint.
Line: 34
Column: 16
new = self._get_checked_instance(Pareto, _instance)
new.scale = self.scale.expand(batch_shape)
new.alpha = self.alpha.expand(batch_shape)
return super(Pareto, self).expand(batch_shape, _instance=new)
@property
def mean(self):
# mean is inf for alpha <= 1
a = self.alpha.clamp(min=1)
Reported by Pylint.
Line: 39
Column: 9
@property
def mean(self):
# mean is inf for alpha <= 1
a = self.alpha.clamp(min=1)
return a * self.scale / (a - 1)
@property
def variance(self):
# var is inf for alpha <= 2
Reported by Pylint.
Line: 45
Column: 9
@property
def variance(self):
# var is inf for alpha <= 2
a = self.alpha.clamp(min=2)
return self.scale.pow(2) * a / ((a - 1).pow(2) * (a - 2))
@constraints.dependent_property(is_discrete=False, event_dim=0)
def support(self):
return constraints.greater_than(self.scale)
Reported by Pylint.
Line: 53
Column: 1
return constraints.greater_than(self.scale)
def entropy(self):
return ((self.scale / self.alpha).log() + (1 + self.alpha.reciprocal()))
Reported by Pylint.
tools/stats/scribe.py
7 issues
Line: 20
Column: 5
def _send_to_scribe_via_boto3(logs: str) -> str:
# lazy import so that we don't need to introduce extra dependencies
import boto3 # type: ignore[import]
print("Scribe access token not provided, sending report via boto3...")
event = {"base64_bz2_logs": base64.b64encode(bz2.compress(logs.encode())).decode()}
client = boto3.client("lambda")
res = client.invoke(FunctionName='gh-ci-scribe-proxy', Payload=json.dumps(event).encode())
Reported by Pylint.
Line: 1
Column: 1
import base64
import bz2
import os
import json
def send_to_scribe(logs: str) -> str:
access_token = os.environ.get("SCRIBE_GRAPHQL_ACCESS_TOKEN", "")
Reported by Pylint.
Line: 7
Column: 1
import json
def send_to_scribe(logs: str) -> str:
access_token = os.environ.get("SCRIBE_GRAPHQL_ACCESS_TOKEN", "")
# boto3 can be used when the runner has IAM roles setup
# currently it's used as a fallback when SCRIBE_GRAPHQL_ACCESS_TOKEN is empty
if access_token == "":
Reported by Pylint.
Line: 12
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b105_hardcoded_password_string.html
# boto3 can be used when the runner has IAM roles setup
# currently it's used as a fallback when SCRIBE_GRAPHQL_ACCESS_TOKEN is empty
if access_token == "":
return _send_to_scribe_via_boto3(logs)
return _send_to_scribe_via_http(access_token, logs)
Reported by Bandit.
Line: 20
Column: 5
def _send_to_scribe_via_boto3(logs: str) -> str:
# lazy import so that we don't need to introduce extra dependencies
import boto3 # type: ignore[import]
print("Scribe access token not provided, sending report via boto3...")
event = {"base64_bz2_logs": base64.b64encode(bz2.compress(logs.encode())).decode()}
client = boto3.client("lambda")
res = client.invoke(FunctionName='gh-ci-scribe-proxy', Payload=json.dumps(event).encode())
Reported by Pylint.
Line: 34
Column: 5
def _send_to_scribe_via_http(access_token: str, logs: str) -> str:
# lazy import so that we don't need to introduce extra dependencies
import requests # type: ignore[import]
print("Scribe access token provided, sending report via http...")
r = requests.post(
"https://graph.facebook.com/scribe_logs",
data={"access_token": access_token, "logs": logs},
Reported by Pylint.
Line: 37
Column: 5
import requests # type: ignore[import]
print("Scribe access token provided, sending report via http...")
r = requests.post(
"https://graph.facebook.com/scribe_logs",
data={"access_token": access_token, "logs": logs},
)
r.raise_for_status()
return str(r.text)
Reported by Pylint.
torch/distributed/elastic/events/__init__.py
7 issues
Line: 32
Column: 1
from torch.distributed.elastic.events.handlers import get_logging_handler
from .api import ( # noqa: F401
Event,
EventMetadataValue,
EventSource,
NodeState,
RdzvEvent,
Reported by Pylint.
Line: 53
Column: 5
destination: The string representation of the event handler.
Available handlers found in ``handlers`` module
"""
global _events_loggers
if destination not in _events_loggers:
_events_logger = logging.getLogger(f"torchelastic-events-{destination}")
_events_logger.setLevel(os.environ.get("LOGLEVEL", "INFO"))
# Do not propagate message to the root logger
Reported by Pylint.
Line: 53
Column: 5
destination: The string representation of the event handler.
Available handlers found in ``handlers`` module
"""
global _events_loggers
if destination not in _events_loggers:
_events_logger = logging.getLogger(f"torchelastic-events-{destination}")
_events_logger.setLevel(os.environ.get("LOGLEVEL", "INFO"))
# Do not propagate message to the root logger
Reported by Pylint.
Line: 70
Column: 1
return _events_loggers[destination]
def record(event: Event, destination: str = "null") -> None:
_get_or_create_logger(destination).info(event.serialize())
def record_rdzv_event(event: RdzvEvent) -> None:
_get_or_create_logger("dynamic_rendezvous").info(event.serialize())
Reported by Pylint.
Line: 73
Column: 1
def record(event: Event, destination: str = "null") -> None:
_get_or_create_logger(destination).info(event.serialize())
def record_rdzv_event(event: RdzvEvent) -> None:
_get_or_create_logger("dynamic_rendezvous").info(event.serialize())
def construct_and_record_rdzv_event(
run_id: str,
Reported by Pylint.
Line: 77
Column: 1
_get_or_create_logger("dynamic_rendezvous").info(event.serialize())
def construct_and_record_rdzv_event(
run_id: str,
message: str,
node_state: NodeState,
name: str = "",
hostname: str = "",
Reported by Pylint.
Line: 77
Column: 1
_get_or_create_logger("dynamic_rendezvous").info(event.serialize())
def construct_and_record_rdzv_event(
run_id: str,
message: str,
node_state: NodeState,
name: str = "",
hostname: str = "",
Reported by Pylint.
tools/code_analyzer/op_deps_processor.py
7 issues
Line: 16
Column: 1
import yaml
from typing import Any, List
from tools.codegen.code_template import CodeTemplate
BAZEL_OUTPUT = CodeTemplate("""\
TORCH_DEPS = {
${ops}
}
Reported by Pylint.
Line: 14
Column: 1
import argparse
import yaml
from typing import Any, List
from tools.codegen.code_template import CodeTemplate
BAZEL_OUTPUT = CodeTemplate("""\
TORCH_DEPS = {
Reported by Pylint.
Line: 50
Column: 1
""")
def load_op_deps(fname: str) -> Any:
with open(fname, 'r') as stream:
return yaml.safe_load(stream)
def process_base_ops(graph: Any, base_ops: List[str]) -> None:
Reported by Pylint.
Line: 55
Column: 1
return yaml.safe_load(stream)
def process_base_ops(graph: Any, base_ops: List[str]) -> None:
# remove base ops from all `depends` lists to compress the output graph
for op in graph:
op['depends'] = [
dep for dep in op.get('depends', []) if dep['name'] not in base_ops
]
Reported by Pylint.
Line: 57
Column: 9
def process_base_ops(graph: Any, base_ops: List[str]) -> None:
# remove base ops from all `depends` lists to compress the output graph
for op in graph:
op['depends'] = [
dep for dep in op.get('depends', []) if dep['name'] not in base_ops
]
# add base ops section at the beginning
Reported by Pylint.
Line: 68
Column: 1
'depends': [{'name': name} for name in base_ops]})
def convert(
fname: str,
graph: Any,
output_template: CodeTemplate,
op_template: CodeTemplate,
op_dep_template: CodeTemplate,
Reported by Pylint.
Line: 76
Column: 9
op_dep_template: CodeTemplate,
) -> None:
ops = []
for op in graph:
op_name = op['name']
op_deps = []
for dep in op.get('depends', []):
dep_name = dep['name']
Reported by Pylint.
torch/_classes.py
7 issues
Line: 1
Column: 1
import types
import torch._C
class _ClassNamespace(types.ModuleType):
def __init__(self, name):
super(_ClassNamespace, self).__init__('torch.classes' + name)
self.name = name
def __getattr__(self, attr):
Reported by Pylint.
Line: 4
Column: 1
import types
import torch._C
class _ClassNamespace(types.ModuleType):
def __init__(self, name):
super(_ClassNamespace, self).__init__('torch.classes' + name)
self.name = name
def __getattr__(self, attr):
Reported by Pylint.
Line: 6
Column: 9
class _ClassNamespace(types.ModuleType):
def __init__(self, name):
super(_ClassNamespace, self).__init__('torch.classes' + name)
self.name = name
def __getattr__(self, attr):
proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
if proxy is None:
Reported by Pylint.
Line: 19
Column: 9
__file__ = '_classes.py'
def __init__(self):
super(_Classes, self).__init__('torch.classes')
def __getattr__(self, name):
namespace = _ClassNamespace(name)
setattr(self, name, namespace)
return namespace
Reported by Pylint.
Line: 27
Column: 5
return namespace
@property
def loaded_libraries(self):
return torch.ops.loaded_libraries
def load_library(self, path):
"""
Loads a shared library from the given path into the current process.
Reported by Pylint.
Line: 30
Column: 5
def loaded_libraries(self):
return torch.ops.loaded_libraries
def load_library(self, path):
"""
Loads a shared library from the given path into the current process.
The library being loaded may run global initialization code to register
custom classes with the PyTorch JIT runtime. This allows dynamically
Reported by Pylint.
Line: 10
Column: 17
self.name = name
def __getattr__(self, attr):
proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
if proxy is None:
raise RuntimeError(f'Class {self.name}.{attr} not registered!')
return proxy
class _Classes(types.ModuleType):
Reported by Pylint.
test/test_import_time.py
7 issues
Line: 1
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests
# these tests could eventually be changed to fail if the import/init
# time is greater than a certain threshold, but for now we just use them
# as a way to track the duration of `import torch` in our ossci-metrics
# S3 bucket (see tools/stats/print_test_stats.py)
class TestImportTime(TestCase):
def test_time_import_torch(self):
Reported by Pylint.
Line: 1
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests
# these tests could eventually be changed to fail if the import/init
# time is greater than a certain threshold, but for now we just use them
# as a way to track the duration of `import torch` in our ossci-metrics
# S3 bucket (see tools/stats/print_test_stats.py)
class TestImportTime(TestCase):
def test_time_import_torch(self):
Reported by Pylint.
Line: 8
Column: 1
# time is greater than a certain threshold, but for now we just use them
# as a way to track the duration of `import torch` in our ossci-metrics
# S3 bucket (see tools/stats/print_test_stats.py)
class TestImportTime(TestCase):
def test_time_import_torch(self):
TestCase.runWithPytorchAPIUsageStderr('import torch')
def test_time_cuda_device_count(self):
TestCase.runWithPytorchAPIUsageStderr(
Reported by Pylint.
Line: 9
Column: 5
# as a way to track the duration of `import torch` in our ossci-metrics
# S3 bucket (see tools/stats/print_test_stats.py)
class TestImportTime(TestCase):
def test_time_import_torch(self):
TestCase.runWithPytorchAPIUsageStderr('import torch')
def test_time_cuda_device_count(self):
TestCase.runWithPytorchAPIUsageStderr(
'import torch; torch.cuda.device_count()',
Reported by Pylint.
Line: 9
Column: 5
# as a way to track the duration of `import torch` in our ossci-metrics
# S3 bucket (see tools/stats/print_test_stats.py)
class TestImportTime(TestCase):
def test_time_import_torch(self):
TestCase.runWithPytorchAPIUsageStderr('import torch')
def test_time_cuda_device_count(self):
TestCase.runWithPytorchAPIUsageStderr(
'import torch; torch.cuda.device_count()',
Reported by Pylint.
Line: 12
Column: 5
def test_time_import_torch(self):
TestCase.runWithPytorchAPIUsageStderr('import torch')
def test_time_cuda_device_count(self):
TestCase.runWithPytorchAPIUsageStderr(
'import torch; torch.cuda.device_count()',
)
Reported by Pylint.
Line: 12
Column: 5
def test_time_import_torch(self):
TestCase.runWithPytorchAPIUsageStderr('import torch')
def test_time_cuda_device_count(self):
TestCase.runWithPytorchAPIUsageStderr(
'import torch; torch.cuda.device_count()',
)
Reported by Pylint.
tools/codegen/local.py
7 issues
Line: 29
Column: 20
return _locals.use_const_ref_for_mutable_tensors
@contextmanager
def parametrize(*, use_const_ref_for_mutable_tensors: bool) -> Iterator[None]:
old_use_const_ref_for_mutable_tensors = _locals.use_const_ref_for_mutable_tensors
try:
_locals.use_const_ref_for_mutable_tensors = use_const_ref_for_mutable_tensors
yield
finally:
Reported by Pylint.
Line: 1
Column: 1
import threading
from contextlib import contextmanager
from typing import Optional, Iterator
# Simple dynamic scoping implementation. The name "parametrize" comes
# from Racket.
#
# WARNING WARNING: LOOKING TO EDIT THIS FILE? Think carefully about
# why you need to add a toggle to the global behavior of code
Reported by Pylint.
Line: 17
Column: 1
# sites are eliminated. If you don't have a plan for how to get there,
# DON'T add a new entry here.
class Locals(threading.local):
use_const_ref_for_mutable_tensors: Optional[bool] = None
_locals = Locals()
def use_const_ref_for_mutable_tensors() -> bool:
Reported by Pylint.
Line: 17
Column: 1
# sites are eliminated. If you don't have a plan for how to get there,
# DON'T add a new entry here.
class Locals(threading.local):
use_const_ref_for_mutable_tensors: Optional[bool] = None
_locals = Locals()
def use_const_ref_for_mutable_tensors() -> bool:
Reported by Pylint.
Line: 22
Column: 1
_locals = Locals()
def use_const_ref_for_mutable_tensors() -> bool:
assert _locals.use_const_ref_for_mutable_tensors is not None, \
"need to initialize local.use_const_ref_for_mutable_tensors with " \
"local.parametrize"
return _locals.use_const_ref_for_mutable_tensors
Reported by Pylint.
Line: 23
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
_locals = Locals()
def use_const_ref_for_mutable_tensors() -> bool:
assert _locals.use_const_ref_for_mutable_tensors is not None, \
"need to initialize local.use_const_ref_for_mutable_tensors with " \
"local.parametrize"
return _locals.use_const_ref_for_mutable_tensors
@contextmanager
Reported by Bandit.
Line: 29
Column: 1
return _locals.use_const_ref_for_mutable_tensors
@contextmanager
def parametrize(*, use_const_ref_for_mutable_tensors: bool) -> Iterator[None]:
old_use_const_ref_for_mutable_tensors = _locals.use_const_ref_for_mutable_tensors
try:
_locals.use_const_ref_for_mutable_tensors = use_const_ref_for_mutable_tensors
yield
finally:
Reported by Pylint.