The following issues were found
torch/quantization/__init__.py
13 issues
Line: 1
Column: 1
from .quantize import * # noqa: F403
from .observer import * # noqa: F403
from .qconfig import * # noqa: F403
from .fake_quantize import * # noqa: F403
from .fuse_modules import fuse_modules
from .stubs import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantize_jit import * # noqa: F403
# from .quantize_fx import *
Reported by Pylint.
Line: 2
Column: 1
from .quantize import * # noqa: F403
from .observer import * # noqa: F403
from .qconfig import * # noqa: F403
from .fake_quantize import * # noqa: F403
from .fuse_modules import fuse_modules
from .stubs import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantize_jit import * # noqa: F403
# from .quantize_fx import *
Reported by Pylint.
Line: 3
Column: 1
from .quantize import * # noqa: F403
from .observer import * # noqa: F403
from .qconfig import * # noqa: F403
from .fake_quantize import * # noqa: F403
from .fuse_modules import fuse_modules
from .stubs import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantize_jit import * # noqa: F403
# from .quantize_fx import *
Reported by Pylint.
Line: 4
Column: 1
from .quantize import * # noqa: F403
from .observer import * # noqa: F403
from .qconfig import * # noqa: F403
from .fake_quantize import * # noqa: F403
from .fuse_modules import fuse_modules
from .stubs import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantize_jit import * # noqa: F403
# from .quantize_fx import *
Reported by Pylint.
Line: 5
Column: 1
from .observer import * # noqa: F403
from .qconfig import * # noqa: F403
from .fake_quantize import * # noqa: F403
from .fuse_modules import fuse_modules
from .stubs import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantize_jit import * # noqa: F403
# from .quantize_fx import *
from .quantization_mappings import * # noqa: F403
Reported by Pylint.
Line: 6
Column: 1
from .qconfig import * # noqa: F403
from .fake_quantize import * # noqa: F403
from .fuse_modules import fuse_modules
from .stubs import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantize_jit import * # noqa: F403
# from .quantize_fx import *
from .quantization_mappings import * # noqa: F403
from .fuser_method_mappings import * # noqa: F403
Reported by Pylint.
Line: 7
Column: 1
from .fake_quantize import * # noqa: F403
from .fuse_modules import fuse_modules
from .stubs import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantize_jit import * # noqa: F403
# from .quantize_fx import *
from .quantization_mappings import * # noqa: F403
from .fuser_method_mappings import * # noqa: F403
Reported by Pylint.
Line: 8
Column: 1
from .fuse_modules import fuse_modules
from .stubs import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantize_jit import * # noqa: F403
# from .quantize_fx import *
from .quantization_mappings import * # noqa: F403
from .fuser_method_mappings import * # noqa: F403
def default_eval_fn(model, calib_data):
Reported by Pylint.
Line: 10
Column: 1
from .quant_type import * # noqa: F403
from .quantize_jit import * # noqa: F403
# from .quantize_fx import *
from .quantization_mappings import * # noqa: F403
from .fuser_method_mappings import * # noqa: F403
def default_eval_fn(model, calib_data):
r"""
Default evaluation function takes a torch.utils.data.Dataset or a list of
Reported by Pylint.
Line: 11
Column: 1
from .quantize_jit import * # noqa: F403
# from .quantize_fx import *
from .quantization_mappings import * # noqa: F403
from .fuser_method_mappings import * # noqa: F403
def default_eval_fn(model, calib_data):
r"""
Default evaluation function takes a torch.utils.data.Dataset or a list of
input Tensors and run the model on the dataset
Reported by Pylint.
torch/jit/unsupported_tensor_ops.py
13 issues
Line: 11
Column: 14
def _gen_unsupported_methods_properties():
tensor_attrs = set(filter(lambda x: x[0] != "_", dir(torch.Tensor)))
tensor = torch.tensor([2])
funcs_template = dedent('''
def func(x):
return x.{op}()
''')
Reported by Pylint.
Line: 7
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b102_exec_used.html
from typing import Dict, Any
def execWrapper(code, glob, loc):
exec(code, glob, loc)
def _gen_unsupported_methods_properties():
tensor_attrs = set(filter(lambda x: x[0] != "_", dir(torch.Tensor)))
tensor = torch.tensor([2])
funcs_template = dedent('''
Reported by Bandit.
Line: 7
Column: 5
from typing import Dict, Any
def execWrapper(code, glob, loc):
exec(code, glob, loc)
def _gen_unsupported_methods_properties():
tensor_attrs = set(filter(lambda x: x[0] != "_", dir(torch.Tensor)))
tensor = torch.tensor([2])
funcs_template = dedent('''
Reported by Pylint.
Line: 28
Column: 13
scope: Dict[str, Any] = {}
execWrapper(funcs_str, globals(), scope)
try:
cu = torch.jit.CompilationUnit(funcs_str)
except Exception as e:
if "nonexistent attribute" not in repr(e):
continue
attr_repr = repr(getattr(tensor, attr))
if "bound method" in attr_repr or "built-in method" in attr_repr:
Reported by Pylint.
Line: 29
Column: 16
execWrapper(funcs_str, globals(), scope)
try:
cu = torch.jit.CompilationUnit(funcs_str)
except Exception as e:
if "nonexistent attribute" not in repr(e):
continue
attr_repr = repr(getattr(tensor, attr))
if "bound method" in attr_repr or "built-in method" in attr_repr:
methods.append(attr)
Reported by Pylint.
Line: 1
Column: 1
import torch.jit
from textwrap import dedent
from typing import Dict, Any
def execWrapper(code, glob, loc):
exec(code, glob, loc)
def _gen_unsupported_methods_properties():
Reported by Pylint.
Line: 2
Column: 1
import torch.jit
from textwrap import dedent
from typing import Dict, Any
def execWrapper(code, glob, loc):
exec(code, glob, loc)
def _gen_unsupported_methods_properties():
Reported by Pylint.
Line: 4
Column: 1
import torch.jit
from textwrap import dedent
from typing import Dict, Any
def execWrapper(code, glob, loc):
exec(code, glob, loc)
def _gen_unsupported_methods_properties():
Reported by Pylint.
Line: 6
Column: 1
from typing import Dict, Any
def execWrapper(code, glob, loc):
exec(code, glob, loc)
def _gen_unsupported_methods_properties():
tensor_attrs = set(filter(lambda x: x[0] != "_", dir(torch.Tensor)))
tensor = torch.tensor([2])
Reported by Pylint.
Line: 6
Column: 1
from typing import Dict, Any
def execWrapper(code, glob, loc):
exec(code, glob, loc)
def _gen_unsupported_methods_properties():
tensor_attrs = set(filter(lambda x: x[0] != "_", dir(torch.Tensor)))
tensor = torch.tensor([2])
Reported by Pylint.
torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py
13 issues
Line: 25
Column: 23
@property
def file_init_method(self):
return torch.testing._internal.dist_utils.INIT_METHOD_TEMPLATE.format(
file_name=self.file_name
)
@property
@abstractmethod
def rpc_backend(self):
Reported by Pylint.
Line: 24
Column: 16
@property
def file_init_method(self):
return torch.testing._internal.dist_utils.INIT_METHOD_TEMPLATE.format(
file_name=self.file_name
)
@property
@abstractmethod
Reported by Pylint.
Line: 43
Column: 9
Does nothing for other agents.
"""
pass
# Shutdown sequence is not well defined, so we may see any of the following
# errors when running tests that simulate errors via a shutdown on the
# remote end.
@abstractmethod
Reported by Pylint.
Line: 55
Column: 9
tests that check for failures. This function is used to match against
possible errors to ensure failures were raised properly.
"""
pass
@abstractmethod
def get_timeout_error_regex(self):
"""
Returns a partial string indicating the error we should receive when an
Reported by Pylint.
Line: 64
Column: 9
RPC has timed out. Useful for use with assertRaisesRegex() to ensure we
have the right errors during timeout.
"""
pass
Reported by Pylint.
Line: 1
Column: 1
import os
from abc import ABC, abstractmethod
import torch.testing._internal.dist_utils
class RpcAgentTestFixture(ABC):
@property
def world_size(self) -> int:
Reported by Pylint.
Line: 7
Column: 1
import torch.testing._internal.dist_utils
class RpcAgentTestFixture(ABC):
@property
def world_size(self) -> int:
return 4
@property
Reported by Pylint.
Line: 9
Column: 5
class RpcAgentTestFixture(ABC):
@property
def world_size(self) -> int:
return 4
@property
def init_method(self):
use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
Reported by Pylint.
Line: 13
Column: 5
return 4
@property
def init_method(self):
use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
if use_tcp_init == "1":
master_addr = os.environ["MASTER_ADDR"]
master_port = os.environ["MASTER_PORT"]
return f"tcp://{master_addr}:{master_port}"
Reported by Pylint.
Line: 15
Column: 9
@property
def init_method(self):
use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
if use_tcp_init == "1":
master_addr = os.environ["MASTER_ADDR"]
master_port = os.environ["MASTER_PORT"]
return f"tcp://{master_addr}:{master_port}"
else:
return self.file_init_method
Reported by Pylint.
tools/amd_build/build_amd.py
13 issues
Line: 15
Column: 1
'torch',
'utils')))
from hipify import hipify_python # type: ignore[import]
parser = argparse.ArgumentParser(description='Top-level script for HIPifying, filling in most common parameters')
parser.add_argument(
'--out-of-place-only',
action='store_true',
Reported by Pylint.
Line: 125
Column: 3
except IOError:
return False
# TODO Remove once gloo submodule is recent enough to contain upstream fix.
if is_hip_clang():
gloo_cmake_file = "third_party/gloo/cmake/Hip.cmake"
do_write = False
with open(gloo_cmake_file, "r") as sources:
lines = sources.readlines()
Reported by Pylint.
Line: 154
Column: 3
sources.write(line)
print("%s updated" % gloo_cmake_file)
# TODO Remove once gloo submodule is recent enough to contain upstream fix.
if is_hip_clang():
gloo_cmake_file = "third_party/gloo/cmake/Dependencies.cmake"
do_write = False
with open(gloo_cmake_file, "r") as sources:
lines = sources.readlines()
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
import os
import argparse
import sys
sys.path.append(os.path.realpath(os.path.join(
__file__,
os.path.pardir,
Reported by Pylint.
Line: 15
Column: 1
'torch',
'utils')))
from hipify import hipify_python # type: ignore[import]
parser = argparse.ArgumentParser(description='Top-level script for HIPifying, filling in most common parameters')
parser.add_argument(
'--out-of-place-only',
action='store_true',
Reported by Pylint.
Line: 17
Column: 1
from hipify import hipify_python # type: ignore[import]
parser = argparse.ArgumentParser(description='Top-level script for HIPifying, filling in most common parameters')
parser.add_argument(
'--out-of-place-only',
action='store_true',
help="Whether to only run hipify out-of-place on source files")
Reported by Pylint.
Line: 118
Column: 1
]
# Check if the compiler is hip-clang.
def is_hip_clang() -> bool:
try:
hip_path = os.getenv('HIP_PATH', '/opt/rocm/hip')
return 'HIP_COMPILER=clang' in open(hip_path + '/lib/.hipInfo').read()
except IOError:
return False
Reported by Pylint.
Line: 127
Column: 5
# TODO Remove once gloo submodule is recent enough to contain upstream fix.
if is_hip_clang():
gloo_cmake_file = "third_party/gloo/cmake/Hip.cmake"
do_write = False
with open(gloo_cmake_file, "r") as sources:
lines = sources.readlines()
newlines = [line.replace(' hip_hcc ', ' amdhip64 ') for line in lines]
if lines == newlines:
Reported by Pylint.
Line: 128
Column: 5
# TODO Remove once gloo submodule is recent enough to contain upstream fix.
if is_hip_clang():
gloo_cmake_file = "third_party/gloo/cmake/Hip.cmake"
do_write = False
with open(gloo_cmake_file, "r") as sources:
lines = sources.readlines()
newlines = [line.replace(' hip_hcc ', ' amdhip64 ') for line in lines]
if lines == newlines:
print("%s skipped" % gloo_cmake_file)
Reported by Pylint.
Line: 140
Column: 1
sources.write(line)
print("%s updated" % gloo_cmake_file)
gloo_cmake_file = "third_party/gloo/cmake/Modules/Findrccl.cmake"
if os.path.exists(gloo_cmake_file):
do_write = False
with open(gloo_cmake_file, "r") as sources:
lines = sources.readlines()
newlines = [line.replace('RCCL_LIBRARY', 'RCCL_LIBRARY_PATH') for line in lines]
Reported by Pylint.
torch/ao/sparsity/experimental/pruner/parametrization.py
13 issues
Line: 23
Column: 32
def __call__(self, module, input, output):
max_outputs = self.param.original_outputs
pruned_outputs = self.param.pruned_outputs
reconstructed_tensor = torch.zeros((output.shape[0], len(max_outputs)))
valid_columns = list(max_outputs - pruned_outputs)
reconstructed_tensor[:, valid_columns] = output
return reconstructed_tensor
Reported by Pylint.
Line: 36
Column: 32
def __call__(self, module, input, output):
max_outputs = self.param.original_outputs
pruned_outputs = self.param.pruned_outputs
reconstructed_tensor = torch.zeros((output.shape[0], len(max_outputs), output.shape[2], output.shape[3]))
valid_columns = list(max_outputs - pruned_outputs)
reconstructed_tensor[:, valid_columns, :, :] = output
return reconstructed_tensor
Reported by Pylint.
Line: 20
Column: 32
def __init__(self, parametrization):
self.param = parametrization
def __call__(self, module, input, output):
max_outputs = self.param.original_outputs
pruned_outputs = self.param.pruned_outputs
reconstructed_tensor = torch.zeros((output.shape[0], len(max_outputs)))
valid_columns = list(max_outputs - pruned_outputs)
reconstructed_tensor[:, valid_columns] = output
Reported by Pylint.
Line: 33
Column: 32
def __init__(self, parametrization):
self.param = parametrization
def __call__(self, module, input, output):
max_outputs = self.param.original_outputs
pruned_outputs = self.param.pruned_outputs
reconstructed_tensor = torch.zeros((output.shape[0], len(max_outputs), output.shape[2], output.shape[3]))
valid_columns = list(max_outputs - pruned_outputs)
reconstructed_tensor[:, valid_columns, :, :] = output
Reported by Pylint.
Line: 1
Column: 1
import torch
from torch import nn
class PruningParametrization(nn.Module):
def __init__(self, original_outputs):
super().__init__()
self.original_outputs = set(range(original_outputs.item()))
self.pruned_outputs = set() # Will contain indicies of outputs to prune
Reported by Pylint.
Line: 5
Column: 1
from torch import nn
class PruningParametrization(nn.Module):
def __init__(self, original_outputs):
super().__init__()
self.original_outputs = set(range(original_outputs.item()))
self.pruned_outputs = set() # Will contain indicies of outputs to prune
Reported by Pylint.
Line: 11
Column: 5
self.original_outputs = set(range(original_outputs.item()))
self.pruned_outputs = set() # Will contain indicies of outputs to prune
def forward(self, x):
valid_outputs = self.original_outputs - self.pruned_outputs
return x[list(valid_outputs)]
class LinearActivationReconstruction:
Reported by Pylint.
Line: 11
Column: 5
self.original_outputs = set(range(original_outputs.item()))
self.pruned_outputs = set() # Will contain indicies of outputs to prune
def forward(self, x):
valid_outputs = self.original_outputs - self.pruned_outputs
return x[list(valid_outputs)]
class LinearActivationReconstruction:
Reported by Pylint.
Line: 16
Column: 1
return x[list(valid_outputs)]
class LinearActivationReconstruction:
def __init__(self, parametrization):
self.param = parametrization
def __call__(self, module, input, output):
max_outputs = self.param.original_outputs
Reported by Pylint.
Line: 16
Column: 1
return x[list(valid_outputs)]
class LinearActivationReconstruction:
def __init__(self, parametrization):
self.param = parametrization
def __call__(self, module, input, output):
max_outputs = self.param.original_outputs
Reported by Pylint.
torch/distributed/launcher/api.py
13 issues
Line: 158
Column: 8
2.2 otherwise, use ``entrypoint`` value.
3. Otherwise, return empty string.
"""
if isinstance(entrypoint, Callable): # type: ignore[arg-type]
return entrypoint.__name__ # type: ignore[union-attr]
elif isinstance(entrypoint, str):
if entrypoint == sys.executable:
return next((arg for arg in args if arg[0] != "-"), "")
else:
Reported by Pylint.
Line: 197
Column: 9
) -> Dict[int, Any]:
if not config.run_id:
run_id = str(uuid.uuid4().int)
logger.warning(f"config has no run_id, generate a new one: {run_id}")
config.run_id = run_id
entrypoint_name = _get_entrypoint_name(entrypoint, args)
logger.info(
Reported by Pylint.
Line: 202
Column: 5
entrypoint_name = _get_entrypoint_name(entrypoint, args)
logger.info(
f"Starting elastic_operator with launch configs:\n"
f" entrypoint : {entrypoint_name}\n"
f" min_nodes : {config.min_nodes}\n"
f" max_nodes : {config.max_nodes}\n"
f" nproc_per_node : {config.nproc_per_node}\n"
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import sys
import uuid
Reported by Pylint.
Line: 28
Column: 1
@dataclass
class LaunchConfig:
"""
Creates a rendezvous config.
Args:
min_nodes: Minimum amount of nodes that the user function will
Reported by Pylint.
Line: 46
Column: 1
rdzv_endpoint: The endpoint of the rdzv sync. storage.
rdzv_configs: Key, value pair that specifies rendezvous specific configuration.
rdzv_timeout: Legacy argument that specifies timeout for the rendezvous. It is going
to be removed in future versions, see the note below. The default timeout is 900 seconds.
rdzv_id: The unique run id of the job (if not passed a unique one will be
deduced from run environment - flow workflow id in flow - or auto generated).
role: User defined role of the worker (defaults to "trainer").
max_restarts: The maximum amount of restarts that elastic agent will conduct
on workers before failure.
Reported by Pylint.
Line: 95
Column: 1
self.rdzv_configs["timeout"] = default_timeout
class elastic_launch:
"""
Launches an torchelastic agent on the container that invoked the entrypoint.
1. Pass the ``entrypoint`` arguments as non ``kwargs`` (e.g. no named parameters)/
``entrypoint`` can be a function or a command.
Reported by Pylint.
Line: 95
Column: 1
self.rdzv_configs["timeout"] = default_timeout
class elastic_launch:
"""
Launches an torchelastic agent on the container that invoked the entrypoint.
1. Pass the ``entrypoint`` arguments as non ``kwargs`` (e.g. no named parameters)/
``entrypoint`` can be a function or a command.
Reported by Pylint.
Line: 153
Column: 1
"""Retrive entrypoint name with the rule:
1. If entrypoint is a function, use ``entrypont.__qualname__``.
2. If entrypoint is a string, check its value:
2.1 if entrypoint equals to ``sys.executable`` (like "python"), use the first element from ``args``
which does not start with hifen letter (for example, "-u" will be skipped).
2.2 otherwise, use ``entrypoint`` value.
3. Otherwise, return empty string.
"""
if isinstance(entrypoint, Callable): # type: ignore[arg-type]
Reported by Pylint.
Line: 158
Column: 5
2.2 otherwise, use ``entrypoint`` value.
3. Otherwise, return empty string.
"""
if isinstance(entrypoint, Callable): # type: ignore[arg-type]
return entrypoint.__name__ # type: ignore[union-attr]
elif isinstance(entrypoint, str):
if entrypoint == sys.executable:
return next((arg for arg in args if arg[0] != "-"), "")
else:
Reported by Pylint.
test/test_pruning_op.py
13 issues
Line: 2
Column: 1
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
import torch
from torch.testing._internal.common_utils import TestCase
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
Reported by Pylint.
Line: 3
Column: 1
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
import torch
from torch.testing._internal.common_utils import TestCase
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
Reported by Pylint.
Line: 5
Column: 1
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
import torch
from torch.testing._internal.common_utils import TestCase
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
Reported by Pylint.
Line: 6
Column: 1
from hypothesis import given
import numpy as np
import torch
from torch.testing._internal.common_utils import TestCase
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
class PruningOpTest(TestCase):
Reported by Pylint.
Line: 7
Column: 1
import numpy as np
import torch
from torch.testing._internal.common_utils import TestCase
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
class PruningOpTest(TestCase):
Reported by Pylint.
Line: 32
Column: 20
mask = self._generate_rowwise_mask(embedding_rows)
def get_pt_result(embedding_weights, mask, indices_type):
return torch._rowwise_prune(embedding_weights, mask, indices_type)
# Reference implementation.
def get_reference_result(embedding_weights, mask, indices_type):
num_embeddings = mask.size()[0]
compressed_idx_out = torch.zeros(num_embeddings, dtype=indices_type)
Reported by Pylint.
Line: 1
Column: 1
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
import torch
from torch.testing._internal.common_utils import TestCase
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
Reported by Pylint.
Line: 11
Column: 1
hu.assert_deadline_disabled()
class PruningOpTest(TestCase):
# Generate rowwise mask vector based on indicator and threshold value.
# indicator is a vector that contains one value per weight row and it
# represents the importance of a row.
# We mask a row if its indicator value is less than the threshold.
Reported by Pylint.
Line: 17
Column: 5
# indicator is a vector that contains one value per weight row and it
# represents the importance of a row.
# We mask a row if its indicator value is less than the threshold.
def _generate_rowwise_mask(self, embedding_rows):
indicator = torch.from_numpy((np.random.random_sample(embedding_rows)).astype(np.float32))
threshold = np.random.random_sample()
mask = torch.BoolTensor([True if val >= threshold else False for val in indicator])
return mask
Reported by Pylint.
Line: 20
Column: 34
def _generate_rowwise_mask(self, embedding_rows):
indicator = torch.from_numpy((np.random.random_sample(embedding_rows)).astype(np.float32))
threshold = np.random.random_sample()
mask = torch.BoolTensor([True if val >= threshold else False for val in indicator])
return mask
def _test_rowwise_prune_op(self, embedding_rows, embedding_dims, indices_type, weights_dtype):
embedding_weights = None
if weights_dtype in [torch.int8, torch.int16, torch.int32, torch.int64]:
Reported by Pylint.
torch/distributions/cauchy.py
13 issues
Line: 34
Column: 27
def __init__(self, loc, scale, validate_args=None):
self.loc, self.scale = broadcast_all(loc, scale)
if isinstance(loc, Number) and isinstance(scale, Number):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super(Cauchy, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
Reported by Pylint.
Line: 41
Column: 23
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Cauchy, _instance)
batch_shape = torch.Size(batch_shape)
new.loc = self.loc.expand(batch_shape)
new.scale = self.scale.expand(batch_shape)
super(Cauchy, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
Reported by Pylint.
Line: 50
Column: 16
@property
def mean(self):
return torch.full(self._extended_shape(), nan, dtype=self.loc.dtype, device=self.loc.device)
@property
def variance(self):
return torch.full(self._extended_shape(), inf, dtype=self.loc.dtype, device=self.loc.device)
Reported by Pylint.
Line: 54
Column: 16
@property
def variance(self):
return torch.full(self._extended_shape(), inf, dtype=self.loc.dtype, device=self.loc.device)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
eps = self.loc.new(shape).cauchy_()
return self.loc + eps * self.scale
Reported by Pylint.
Line: 56
Column: 36
def variance(self):
return torch.full(self._extended_shape(), inf, dtype=self.loc.dtype, device=self.loc.device)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
eps = self.loc.new(shape).cauchy_()
return self.loc + eps * self.scale
def log_prob(self, value):
Reported by Pylint.
Line: 69
Column: 16
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
return torch.atan((value - self.loc) / self.scale) / math.pi + 0.5
def icdf(self, value):
return torch.tan(math.pi * (value - 0.5)) * self.scale + self.loc
def entropy(self):
Reported by Pylint.
Line: 72
Column: 16
return torch.atan((value - self.loc) / self.scale) / math.pi + 0.5
def icdf(self, value):
return torch.tan(math.pi * (value - 0.5)) * self.scale + self.loc
def entropy(self):
return math.log(4 * math.pi) + self.scale.log()
Reported by Pylint.
Line: 11
Column: 1
from torch.distributions.utils import broadcast_all
class Cauchy(Distribution):
r"""
Samples from a Cauchy (Lorentz) distribution. The distribution of the ratio of
independent normally distributed random variables with means `0` follows a
Cauchy distribution.
Reported by Pylint.
Line: 45
Column: 9
new.loc = self.loc.expand(batch_shape)
new.scale = self.scale.expand(batch_shape)
super(Cauchy, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
@property
def mean(self):
return torch.full(self._extended_shape(), nan, dtype=self.loc.dtype, device=self.loc.device)
Reported by Pylint.
Line: 1
Column: 1
import math
from torch._six import inf, nan
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all
Reported by Pylint.
torch/distributed/elastic/multiprocessing/tail_log.py
13 issues
Line: 132
Column: 20
for local_rank, f in enumerate(self._futs):
try:
f.result()
except Exception as e:
log.error(
f"error in log tailor for {self._name}{local_rank}."
f" {e.__class__.__qualname__}: {e}",
)
Reported by Pylint.
Line: 133
Column: 17
try:
f.result()
except Exception as e:
log.error(
f"error in log tailor for {self._name}{local_rank}."
f" {e.__class__.__qualname__}: {e}",
)
if self._threadpool:
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
Reported by Pylint.
Line: 21
Column: 1
log = logging.getLogger(__name__)
def tail_logfile(
header: str, file: str, dst: TextIO, finished: Event, interval_sec: float
):
while not os.path.exists(file):
if finished.is_set():
Reported by Pylint.
Line: 30
Column: 29
return
time.sleep(interval_sec)
with open(file, "r") as fp:
while True:
line = fp.readline()
if line:
dst.write(f"{header}{line}")
Reported by Pylint.
Line: 37
Column: 17
if line:
dst.write(f"{header}{line}")
else: # reached EOF
if finished.is_set():
# log line producer is finished
break
else:
# log line producer is still going
# wait for a bit before looping again
Reported by Pylint.
Line: 46
Column: 1
time.sleep(interval_sec)
class TailLog:
"""
Tails the given log files. The log files do not have to exist when the
``start()`` method is called. The tail-er will gracefully wait until the
log files are created by the producer and will tail the contents of the
log files until the ``stop()`` method is called.
Reported by Pylint.
Line: 90
Column: 9
dst: TextIO,
interval_sec: float = 0.1,
):
n = len(log_files)
self._threadpool = None
if n > 0:
self._threadpool = ThreadPoolExecutor(
max_workers=n,
thread_name_prefix=f"{self.__class__.__qualname__}_{name}",
Reported by Pylint.
Line: 108
Column: 5
self._interval_sec = interval_sec
self._stopped = False
def start(self) -> "TailLog":
if not self._threadpool:
return self
for local_rank, file in self._log_files.items():
self._futs.append(
Reported by Pylint.
Line: 125
Column: 5
)
return self
def stop(self) -> None:
for finished in self._finished_events.values():
finished.set()
for local_rank, f in enumerate(self._futs):
try:
Reported by Pylint.
tools/gdb/pytorch-gdb.py
13 issues
Line: 1
Column: 1
import gdb # type: ignore[import]
import textwrap
from typing import Any
class DisableBreakpoints:
"""
Context-manager to temporarily disable all gdb breakpoints, useful if
there is a risk to hit one during the evaluation of one of our custom
commands
Reported by Pylint.
Line: 13
Column: 9
"""
def __enter__(self) -> None:
self.disabled_breakpoints = []
for b in gdb.breakpoints():
if b.enabled:
b.enabled = False
self.disabled_breakpoints.append(b)
Reported by Pylint.
Line: 39
Column: 33
gdb.Command.__init__(self, 'torch-tensor-repr',
gdb.COMMAND_USER, gdb.COMPLETE_EXPRESSION)
def invoke(self, args: str, from_tty: bool) -> None:
args = gdb.string_to_argv(args)
if len(args) != 1:
print('Usage: torch-tensor-repr EXP')
return
name = args[0]
Reported by Pylint.
Line: 1
Column: 1
import gdb # type: ignore[import]
import textwrap
from typing import Any
class DisableBreakpoints:
"""
Context-manager to temporarily disable all gdb breakpoints, useful if
there is a risk to hit one during the evaluation of one of our custom
commands
Reported by Pylint.
Line: 1
Column: 1
import gdb # type: ignore[import]
import textwrap
from typing import Any
class DisableBreakpoints:
"""
Context-manager to temporarily disable all gdb breakpoints, useful if
there is a risk to hit one during the evaluation of one of our custom
commands
Reported by Pylint.
Line: 2
Column: 1
import gdb # type: ignore[import]
import textwrap
from typing import Any
class DisableBreakpoints:
"""
Context-manager to temporarily disable all gdb breakpoints, useful if
there is a risk to hit one during the evaluation of one of our custom
commands
Reported by Pylint.
Line: 3
Column: 1
import gdb # type: ignore[import]
import textwrap
from typing import Any
class DisableBreakpoints:
"""
Context-manager to temporarily disable all gdb breakpoints, useful if
there is a risk to hit one during the evaluation of one of our custom
commands
Reported by Pylint.
Line: 14
Column: 13
def __enter__(self) -> None:
self.disabled_breakpoints = []
for b in gdb.breakpoints():
if b.enabled:
b.enabled = False
self.disabled_breakpoints.append(b)
def __exit__(self, etype: Any, evalue: Any, tb: Any) -> None:
Reported by Pylint.
Line: 19
Column: 5
b.enabled = False
self.disabled_breakpoints.append(b)
def __exit__(self, etype: Any, evalue: Any, tb: Any) -> None:
for b in self.disabled_breakpoints:
b.enabled = True
class TensorRepr(gdb.Command): # type: ignore[misc, no-any-unimported]
"""
Reported by Pylint.
Line: 20
Column: 13
self.disabled_breakpoints.append(b)
def __exit__(self, etype: Any, evalue: Any, tb: Any) -> None:
for b in self.disabled_breakpoints:
b.enabled = True
class TensorRepr(gdb.Command): # type: ignore[misc, no-any-unimported]
"""
Print a human readable representation of the given at::Tensor.
Reported by Pylint.