The following issues were found

torch/distributed/pipeline/sync/microbatch.py
12 issues
Module 'torch' has no 'cat' member
Error

Line: 216 Column: 18

              
    if outputs[0].atomic:
        tensors = tuple(b.tensor for b in outputs)
        output = torch.cat(tensors)
    else:
        output_buf: List[Any] = []
        for i in range(len(outputs[0])):
            output_type = type(outputs[0][i])
            current_outputs = []

            

Reported by Pylint.

Module 'torch' has no 'cat' member
Error

Line: 228 Column: 35

                              current_outputs.append(batch[i])

            if torch.is_tensor(outputs[0][i]):
                output_buf.append(torch.cat(current_outputs))
            else:
                output_buf.append(current_outputs)

        output = tuple(output_buf)


            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 183 Column: 9

                  batches: List[Any] = [[] for _ in range(chunks)]
    # Actual number of chunks produced
    num_chunks = -1
    for input in inputs:
        if torch.is_tensor(input):
            # Chunk only tensors.
            tensors = input.chunk(chunks)

            # Validate number of chunks equal across all inputs.

            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 23 Column: 1

              Function = Callable[[TensorOrTensors], Union[List[Any], Tensor]]


class NoChunk(object):
    """
    Wrapper for a Tensor in :meth:`Pipe.forward` indicating that the tensor
    should not be chunked on the batch dimension and instead be replicated
    as-is across all micro-batches. This is useful for tensors which might
    not have any 'batch' semantics for the model.

            

Reported by Pylint.

Class 'NoChunk' inherits from object, can be safely removed from bases in python3
Error

Line: 23 Column: 1

              Function = Callable[[TensorOrTensors], Union[List[Any], Tensor]]


class NoChunk(object):
    """
    Wrapper for a Tensor in :meth:`Pipe.forward` indicating that the tensor
    should not be chunked on the batch dimension and instead be replicated
    as-is across all micro-batches. This is useful for tensors which might
    not have any 'batch' semantics for the model.

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 36 Column: 5

                      self._tensor = inp

    @property
    def tensor(self):
        return self._tensor


class Batch:
    """

            

Reported by Pylint.

Either all return statements in a function should return an expression, or none of them should.
Error

Line: 78 Column: 5

              
        raise TypeError("No tensor found!")

    def get_device(self):
        """
        Retrieves the device for this microbatch.
        """
        if self.atomic:
            return self._values.device  # type: ignore[union-attr]

            

Reported by Pylint.

Unnecessary "else" after "return"
Error

Line: 93 Column: 9

                      """Calls a function on the microbatch. It also wraps
        the output with :class:`Batch`.
        """
        if self.atomic:
            return Batch(function(self._values))
        else:
            return Batch(function(*self._values))

    def __repr__(self) -> str:

            

Reported by Pylint.

Line too long (104/100)
Error

Line: 137 Column: 1

                  def _setitem_by_index(self, index: int, value) -> None:
        if not self.atomic:
            i = index
            self._values = self._values[:i] + (value,) + self._values[i + 1 :]  # type: ignore[operator]
            return

        if index != 0:
            raise IndexError("atomic batch allows index 0 only")


            

Reported by Pylint.

Unnecessary parens after 'not' keyword
Error

Line: 146 Column: 1

                      self._values = value

    def _setitem_by_slice(self, index: slice, value) -> None:
        if not (index.start is index.stop is index.step is None):
            raise NotImplementedError("only slice [:] supported")

        if not self.atomic:
            self._values = value
            return

            

Reported by Pylint.

tools/code_analyzer/gen_op_registration_allowlist.py
12 issues
Redefining name 'root_ops' from outer scope (line 93)
Error

Line: 47 Column: 5

              
def gen_transitive_closure(
    dep_graph: DepGraph,
    root_ops: List[str],
    train: bool = False,
) -> List[str]:
    result = set(root_ops)
    queue = root_ops[:]


            

Reported by Pylint.

TODO: when FL is migrated from full-jit to lite trainer, remove '__ROOT__'
Error

Line: 62 Column: 3

                  # `__ROOT__` key to include ops which can be called from C++ code directly,
    # in addition to ops that are called from TorchScript model.
    # '__ROOT__' is only needed for full-jit. Keep it only for training.
    # TODO: when FL is migrated from full-jit to lite trainer, remove '__ROOT__'
    if train:
        queue.append('__ROOT__')

    while queue:
        cur = queue.pop()

            

Reported by Pylint.

Redefining name 'root_ops' from outer scope (line 93)
Error

Line: 75 Column: 53

              
    return sorted(result)

def gen_transitive_closure_str(dep_graph: DepGraph, root_ops: List[str]) -> str:
    return ' '.join(gen_transitive_closure(dep_graph, root_ops))


if __name__ == "__main__":
    parser = argparse.ArgumentParser(

            

Reported by Pylint.

standard import "from collections import defaultdict" should be placed before "import yaml"
Error

Line: 14 Column: 1

              import argparse
import yaml

from collections import defaultdict
from typing import Dict, List, Set


def canonical_name(opname: str) -> str:
    # Skip the overload name part as it's not supported by code analyzer yet.

            

Reported by Pylint.

standard import "from typing import Dict, List, Set" should be placed before "import yaml"
Error

Line: 15 Column: 1

              import yaml

from collections import defaultdict
from typing import Dict, List, Set


def canonical_name(opname: str) -> str:
    # Skip the overload name part as it's not supported by code analyzer yet.
    return opname.split('.', 1)[0]

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 18 Column: 1

              from typing import Dict, List, Set


def canonical_name(opname: str) -> str:
    # Skip the overload name part as it's not supported by code analyzer yet.
    return opname.split('.', 1)[0]


DepGraph = Dict[str, Set[str]]

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 26 Column: 1

              DepGraph = Dict[str, Set[str]]


def load_op_dep_graph(fname: str) -> DepGraph:
    with open(fname, 'r') as stream:
        result = defaultdict(set)
        for op in yaml.safe_load(stream):
            op_name = canonical_name(op['name'])
            for dep in op.get('depends', []):

            

Reported by Pylint.

Variable name "op" doesn't conform to snake_case naming style
Error

Line: 29 Column: 13

              def load_op_dep_graph(fname: str) -> DepGraph:
    with open(fname, 'r') as stream:
        result = defaultdict(set)
        for op in yaml.safe_load(stream):
            op_name = canonical_name(op['name'])
            for dep in op.get('depends', []):
                dep_name = canonical_name(dep['name'])
                result[op_name].add(dep_name)
        return dict(result)

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 37 Column: 1

                      return dict(result)


def load_root_ops(fname: str) -> List[str]:
    result = []
    with open(fname, 'r') as stream:
        for op in yaml.safe_load(stream):
            result.append(canonical_name(op))
    return result

            

Reported by Pylint.

Variable name "op" doesn't conform to snake_case naming style
Error

Line: 40 Column: 13

              def load_root_ops(fname: str) -> List[str]:
    result = []
    with open(fname, 'r') as stream:
        for op in yaml.safe_load(stream):
            result.append(canonical_name(op))
    return result


def gen_transitive_closure(

            

Reported by Pylint.

torch/csrc/api/include/torch/optim/serialize.h
12 issues
read - Check buffer boundaries if used in a loop including recursive loops
Security

Line: 38 Column: 15 CWE codes: 120 20

                  std::vector<std::string> tensorimpl_keys = archive.keys();
    for (const std::string& tensorimpl_key : tensorimpl_keys) {
      serialize::InputArchive param_state_archive;
      archive.read(tensorimpl_key, param_state_archive);
      DerivedOptimizerParamState param_state;
      param_state.serialize(param_state_archive);
      state[tensorimpl_key] = std::make_unique<DerivedOptimizerParamState>(param_state);
    }
  }

            

Reported by FlawFinder.

read - Check buffer boundaries if used in a loop including recursive loops
Security

Line: 77 Column: 13 CWE codes: 120 20

                    serialize::InputArchive& archive,
      std::vector<std::pair<std::vector<std::string>, std::unique_ptr<OptimizerOptions>>>& param_groups) {
    torch::Tensor param_groups_size_tensor;
    archive.read("param_groups/size", param_groups_size_tensor);
    const int64_t param_groups_size = param_groups_size_tensor.item<int64_t>();
    for (const auto i : c10::irange(param_groups_size)) {
      serialize::InputArchive param_group_archive;
      archive.read("param_groups/" + c10::guts::to_string(i), param_group_archive);
      torch::Tensor size_tensor;

            

Reported by FlawFinder.

read - Check buffer boundaries if used in a loop including recursive loops
Security

Line: 81 Column: 15 CWE codes: 120 20

                  const int64_t param_groups_size = param_groups_size_tensor.item<int64_t>();
    for (const auto i : c10::irange(param_groups_size)) {
      serialize::InputArchive param_group_archive;
      archive.read("param_groups/" + c10::guts::to_string(i), param_group_archive);
      torch::Tensor size_tensor;
      param_group_archive.read("params/size", size_tensor);
      const int64_t size = size_tensor.item<int64_t>();
      std::vector<std::string> params;
      for (const auto index : c10::irange(size)) {

            

Reported by FlawFinder.

read - Check buffer boundaries if used in a loop including recursive loops
Security

Line: 83 Column: 27 CWE codes: 120 20

                    serialize::InputArchive param_group_archive;
      archive.read("param_groups/" + c10::guts::to_string(i), param_group_archive);
      torch::Tensor size_tensor;
      param_group_archive.read("params/size", size_tensor);
      const int64_t size = size_tensor.item<int64_t>();
      std::vector<std::string> params;
      for (const auto index : c10::irange(size)) {
        IValue ivalue;
        param_group_archive.read(

            

Reported by FlawFinder.

read - Check buffer boundaries if used in a loop including recursive loops
Security

Line: 88 Column: 29 CWE codes: 120 20

                    std::vector<std::string> params;
      for (const auto index : c10::irange(size)) {
        IValue ivalue;
        param_group_archive.read(
          "params/" + c10::to_string(index), ivalue);
        std::string element = ivalue.toStringRef();
        params.emplace_back(element);
      }
      serialize::InputArchive param_group_options_archive;

            

Reported by FlawFinder.

read - Check buffer boundaries if used in a loop including recursive loops
Security

Line: 94 Column: 27 CWE codes: 120 20

                      params.emplace_back(element);
      }
      serialize::InputArchive param_group_options_archive;
      param_group_archive.read("options", param_group_options_archive);
      DerivedOptimizerParamOptions param_group_options(0);
      param_group_options.serialize(param_group_options_archive);
      param_groups.emplace_back(std::make_pair(params, std::make_unique<DerivedOptimizerParamOptions>(param_group_options)));
    }
  }

            

Reported by FlawFinder.

read - Check buffer boundaries if used in a loop including recursive loops
Security

Line: 153 Column: 13 CWE codes: 120 20

                  Optimizer& optimizer) {

    IValue pytorch_version;
    archive.read("pytorch_version", pytorch_version);
    TORCH_INTERNAL_ASSERT(pytorch_version.toStringRef() == "1.5.0");
    serialize::InputArchive state_archive;
    archive.read("state", state_archive);
    ska::flat_hash_map<std::string, std::unique_ptr<OptimizerParamState>> saved_state;
    detail::serialize<DerivedOptimizerParamState>(state_archive, saved_state);

            

Reported by FlawFinder.

read - Check buffer boundaries if used in a loop including recursive loops
Security

Line: 156 Column: 13 CWE codes: 120 20

                  archive.read("pytorch_version", pytorch_version);
    TORCH_INTERNAL_ASSERT(pytorch_version.toStringRef() == "1.5.0");
    serialize::InputArchive state_archive;
    archive.read("state", state_archive);
    ska::flat_hash_map<std::string, std::unique_ptr<OptimizerParamState>> saved_state;
    detail::serialize<DerivedOptimizerParamState>(state_archive, saved_state);

    serialize::InputArchive param_groups_archive;
    archive.read("param_groups", param_groups_archive);

            

Reported by FlawFinder.

read - Check buffer boundaries if used in a loop including recursive loops
Security

Line: 161 Column: 13 CWE codes: 120 20

                  detail::serialize<DerivedOptimizerParamState>(state_archive, saved_state);

    serialize::InputArchive param_groups_archive;
    archive.read("param_groups", param_groups_archive);
    std::vector<std::pair<std::vector<std::string>, std::unique_ptr<OptimizerOptions>>> saved_param_groups;
    detail::serialize<DerivedOptimizerParamOptions>(param_groups_archive, saved_param_groups);

    // update state
    TORCH_CHECK(saved_param_groups.size() == optimizer.param_groups().size(), "loaded state dict has a different number of parameter groups");

            

Reported by FlawFinder.

read - Check buffer boundaries if used in a loop including recursive loops
Security

Line: 202 Column: 11 CWE codes: 120 20

                  BufferContainer& buffers) {
  buffers.clear();
  torch::Tensor size_tensor;
  archive.read(key + "/size", size_tensor);
  const size_t size = size_tensor.item<int64_t>();
  for (const auto index : c10::irange(size)) {
    buffers.emplace_back();
    archive.read(
        key + "/" + c10::to_string(index), buffers.back(), /*is_buffer=*/true);

            

Reported by FlawFinder.

torch/backends/cudnn/rnn.py
12 issues
Module 'torch' has no '_cudnn_init_dropout_state' member
Error

Line: 51 Column: 63

                      if dropout_p == 0:
            dropout_state[dropout_desc_name] = Unserializable(None)
        else:
            dropout_state[dropout_desc_name] = Unserializable(torch._cudnn_init_dropout_state(  # type: ignore[call-arg]
                dropout_p,
                train,
                dropout_seed,
                self_ty=torch.uint8,
                device=torch.device('cuda')))

            

Reported by Pylint.

Module 'torch' has no 'uint8' member
Error

Line: 55 Column: 25

                              dropout_p,
                train,
                dropout_seed,
                self_ty=torch.uint8,
                device=torch.device('cuda')))
    dropout_ts = dropout_state[dropout_desc_name].get()
    return dropout_ts

            

Reported by Pylint.

Module 'torch' has no 'device' member
Error

Line: 56 Column: 24

                              train,
                dropout_seed,
                self_ty=torch.uint8,
                device=torch.device('cuda')))
    dropout_ts = dropout_state[dropout_desc_name].get()
    return dropout_ts

            

Reported by Pylint.

Access to a protected member _cudnn_init_dropout_state of a client class
Error

Line: 51 Column: 63

                      if dropout_p == 0:
            dropout_state[dropout_desc_name] = Unserializable(None)
        else:
            dropout_state[dropout_desc_name] = Unserializable(torch._cudnn_init_dropout_state(  # type: ignore[call-arg]
                dropout_p,
                train,
                dropout_seed,
                self_ty=torch.uint8,
                device=torch.device('cuda')))

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import torch.cuda

try:
    from torch._C import _cudnn
except ImportError:
    # Uses of all the functions below should be guarded by torch.backends.cudnn.is_available(),
    # so it's safe to not emit any checks here.
    _cudnn = None  # type: ignore[assignment]


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 11 Column: 1

                  _cudnn = None  # type: ignore[assignment]


def get_cudnn_mode(mode):
    if mode == 'RNN_RELU':
        return int(_cudnn.RNNMode.rnn_relu)
    elif mode == 'RNN_TANH':
        return int(_cudnn.RNNMode.rnn_tanh)
    elif mode == 'LSTM':

            

Reported by Pylint.

Unnecessary "elif" after "return"
Error

Line: 12 Column: 5

              

def get_cudnn_mode(mode):
    if mode == 'RNN_RELU':
        return int(_cudnn.RNNMode.rnn_relu)
    elif mode == 'RNN_TANH':
        return int(_cudnn.RNNMode.rnn_tanh)
    elif mode == 'LSTM':
        return int(_cudnn.RNNMode.lstm)

            

Reported by Pylint.

Class 'Unserializable' inherits from object, can be safely removed from bases in python3
Error

Line: 27 Column: 1

              # NB: We don't actually need this class anymore (in fact, we could serialize the
# dropout state for even better reproducibility), but it is kept for backwards
# compatibility for old models.
class Unserializable(object):

    def __init__(self, inner):
        self.inner = inner

    def get(self):

            

Reported by Pylint.

Missing class docstring
Error

Line: 27 Column: 1

              # NB: We don't actually need this class anymore (in fact, we could serialize the
# dropout state for even better reproducibility), but it is kept for backwards
# compatibility for old models.
class Unserializable(object):

    def __init__(self, inner):
        self.inner = inner

    def get(self):

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 32 Column: 5

                  def __init__(self, inner):
        self.inner = inner

    def get(self):
        return self.inner

    def __getstate__(self):
        # Note: can't return {}, because python2 won't call __setstate__
        # if the value evaluates to False

            

Reported by Pylint.

torch/distributed/elastic/rendezvous/__init__.py
12 issues
Unable to import '__init__.api'
Error

Line: 131 Column: 1

                   )
"""

from .api import *  # noqa: F403
from .registry import _register_default_handlers


_register_default_handlers()


            

Reported by Pylint.

Unable to import '__init__.registry'
Error

Line: 132 Column: 1

              """

from .api import *  # noqa: F403
from .registry import _register_default_handlers


_register_default_handlers()



            

Reported by Pylint.

Undefined variable name 'RendezvousClosedError' in __all__
Error

Line: 139 Column: 5

              

__all__ = [
    "RendezvousClosedError",
    "RendezvousConnectionError",
    "RendezvousError",
    "RendezvousHandler",
    "RendezvousHandlerCreator",
    "RendezvousHandlerRegistry",

            

Reported by Pylint.

Undefined variable name 'RendezvousConnectionError' in __all__
Error

Line: 140 Column: 5

              
__all__ = [
    "RendezvousClosedError",
    "RendezvousConnectionError",
    "RendezvousError",
    "RendezvousHandler",
    "RendezvousHandlerCreator",
    "RendezvousHandlerRegistry",
    "RendezvousParameters",

            

Reported by Pylint.

Undefined variable name 'RendezvousError' in __all__
Error

Line: 141 Column: 5

              __all__ = [
    "RendezvousClosedError",
    "RendezvousConnectionError",
    "RendezvousError",
    "RendezvousHandler",
    "RendezvousHandlerCreator",
    "RendezvousHandlerRegistry",
    "RendezvousParameters",
    "RendezvousStateError",

            

Reported by Pylint.

Undefined variable name 'RendezvousHandler' in __all__
Error

Line: 142 Column: 5

                  "RendezvousClosedError",
    "RendezvousConnectionError",
    "RendezvousError",
    "RendezvousHandler",
    "RendezvousHandlerCreator",
    "RendezvousHandlerRegistry",
    "RendezvousParameters",
    "RendezvousStateError",
    "RendezvousTimeoutError",

            

Reported by Pylint.

Undefined variable name 'RendezvousHandlerCreator' in __all__
Error

Line: 143 Column: 5

                  "RendezvousConnectionError",
    "RendezvousError",
    "RendezvousHandler",
    "RendezvousHandlerCreator",
    "RendezvousHandlerRegistry",
    "RendezvousParameters",
    "RendezvousStateError",
    "RendezvousTimeoutError",
    "rendezvous_handler_registry",

            

Reported by Pylint.

Undefined variable name 'RendezvousHandlerRegistry' in __all__
Error

Line: 144 Column: 5

                  "RendezvousError",
    "RendezvousHandler",
    "RendezvousHandlerCreator",
    "RendezvousHandlerRegistry",
    "RendezvousParameters",
    "RendezvousStateError",
    "RendezvousTimeoutError",
    "rendezvous_handler_registry",
]

            

Reported by Pylint.

Undefined variable name 'RendezvousParameters' in __all__
Error

Line: 145 Column: 5

                  "RendezvousHandler",
    "RendezvousHandlerCreator",
    "RendezvousHandlerRegistry",
    "RendezvousParameters",
    "RendezvousStateError",
    "RendezvousTimeoutError",
    "rendezvous_handler_registry",
]

            

Reported by Pylint.

Undefined variable name 'RendezvousStateError' in __all__
Error

Line: 146 Column: 5

                  "RendezvousHandlerCreator",
    "RendezvousHandlerRegistry",
    "RendezvousParameters",
    "RendezvousStateError",
    "RendezvousTimeoutError",
    "rendezvous_handler_registry",
]

            

Reported by Pylint.

torch/backends/quantized/__init__.py
12 issues
Missing module docstring
Error

Line: 1 Column: 1

              import sys
import torch
import types
from typing import List

# This function should correspond to the enums present in c10/core/QEngine.h
def _get_qengine_id(qengine: str) -> int:
    if qengine == 'none' or qengine == '' or qengine is None:
        ret = 0

            

Reported by Pylint.

standard import "import types" should be placed before "import torch"
Error

Line: 3 Column: 1

              import sys
import torch
import types
from typing import List

# This function should correspond to the enums present in c10/core/QEngine.h
def _get_qengine_id(qengine: str) -> int:
    if qengine == 'none' or qengine == '' or qengine is None:
        ret = 0

            

Reported by Pylint.

standard import "from typing import List" should be placed before "import torch"
Error

Line: 4 Column: 1

              import sys
import torch
import types
from typing import List

# This function should correspond to the enums present in c10/core/QEngine.h
def _get_qengine_id(qengine: str) -> int:
    if qengine == 'none' or qengine == '' or qengine is None:
        ret = 0

            

Reported by Pylint.

Class '_QEngineProp' inherits from object, can be safely removed from bases in python3
Error

Line: 24 Column: 1

                  all_engines = {0 : 'none', 1 : 'fbgemm', 2 : 'qnnpack'}
    return all_engines.get(qengine, '*undefined')

class _QEngineProp(object):
    def __get__(self, obj, objtype) -> str:
        return _get_qengine_str(torch._C._get_qengine())

    def __set__(self, obj, val: str) -> None:
        torch._C._set_qengine(_get_qengine_id(val))

            

Reported by Pylint.

Class '_SupportedQEnginesProp' inherits from object, can be safely removed from bases in python3
Error

Line: 31 Column: 1

                  def __set__(self, obj, val: str) -> None:
        torch._C._set_qengine(_get_qengine_id(val))

class _SupportedQEnginesProp(object):
    def __get__(self, obj, objtype) -> List[str]:
        qengines = torch._C._supported_qengines()
        return [_get_qengine_str(qe) for qe in qengines]

    def __set__(self, obj, val) -> None:

            

Reported by Pylint.

Missing class docstring
Error

Line: 39 Column: 1

                  def __set__(self, obj, val) -> None:
        raise RuntimeError("Assignment not supported")

class QuantizedEngine(types.ModuleType):
    def __init__(self, m, name):
        super(QuantizedEngine, self).__init__(name)
        self.m = m

    def __getattr__(self, attr):

            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 39 Column: 1

                  def __set__(self, obj, val) -> None:
        raise RuntimeError("Assignment not supported")

class QuantizedEngine(types.ModuleType):
    def __init__(self, m, name):
        super(QuantizedEngine, self).__init__(name)
        self.m = m

    def __getattr__(self, attr):

            

Reported by Pylint.

Consider using Python 3 style super() without arguments
Error

Line: 41 Column: 9

              
class QuantizedEngine(types.ModuleType):
    def __init__(self, m, name):
        super(QuantizedEngine, self).__init__(name)
        self.m = m

    def __getattr__(self, attr):
        return self.m.__getattribute__(attr)


            

Reported by Pylint.

Attribute name "m" doesn't conform to snake_case naming style
Error

Line: 42 Column: 9

              class QuantizedEngine(types.ModuleType):
    def __init__(self, m, name):
        super(QuantizedEngine, self).__init__(name)
        self.m = m

    def __getattr__(self, attr):
        return self.m.__getattribute__(attr)

    engine = _QEngineProp()

            

Reported by Pylint.

Module 'torch._C' has no '_get_qengine' member, but source is unavailable. Consider adding this module to extension-pkg-whitelist if you want to perform analysis based on run-time introspection of living objects.
Error

Line: 26 Column: 33

              
class _QEngineProp(object):
    def __get__(self, obj, objtype) -> str:
        return _get_qengine_str(torch._C._get_qengine())

    def __set__(self, obj, val: str) -> None:
        torch._C._set_qengine(_get_qengine_id(val))

class _SupportedQEnginesProp(object):

            

Reported by Pylint.

torch/distributions/weibull.py
12 issues
Module 'torch' has no 'ones_like' member
Error

Line: 30 Column: 33

                  def __init__(self, scale, concentration, validate_args=None):
        self.scale, self.concentration = broadcast_all(scale, concentration)
        self.concentration_reciprocal = self.concentration.reciprocal()
        base_dist = Exponential(torch.ones_like(self.scale), validate_args=validate_args)
        transforms = [PowerTransform(exponent=self.concentration_reciprocal),
                      AffineTransform(loc=0, scale=self.scale)]
        super(Weibull, self).__init__(base_dist,
                                      transforms,
                                      validate_args=validate_args)

            

Reported by Pylint.

Module 'torch' has no 'lgamma' member
Error

Line: 53 Column: 39

              
    @property
    def mean(self):
        return self.scale * torch.exp(torch.lgamma(1 + self.concentration_reciprocal))

    @property
    def variance(self):
        return self.scale.pow(2) * (torch.exp(torch.lgamma(1 + 2 * self.concentration_reciprocal)) -
                                    torch.exp(2 * torch.lgamma(1 + self.concentration_reciprocal)))

            

Reported by Pylint.

Module 'torch' has no 'exp' member
Error

Line: 53 Column: 29

              
    @property
    def mean(self):
        return self.scale * torch.exp(torch.lgamma(1 + self.concentration_reciprocal))

    @property
    def variance(self):
        return self.scale.pow(2) * (torch.exp(torch.lgamma(1 + 2 * self.concentration_reciprocal)) -
                                    torch.exp(2 * torch.lgamma(1 + self.concentration_reciprocal)))

            

Reported by Pylint.

Module 'torch' has no 'exp' member
Error

Line: 57 Column: 37

              
    @property
    def variance(self):
        return self.scale.pow(2) * (torch.exp(torch.lgamma(1 + 2 * self.concentration_reciprocal)) -
                                    torch.exp(2 * torch.lgamma(1 + self.concentration_reciprocal)))

    def entropy(self):
        return euler_constant * (1 - self.concentration_reciprocal) + \
            torch.log(self.scale * self.concentration_reciprocal) + 1

            

Reported by Pylint.

Module 'torch' has no 'lgamma' member
Error

Line: 57 Column: 47

              
    @property
    def variance(self):
        return self.scale.pow(2) * (torch.exp(torch.lgamma(1 + 2 * self.concentration_reciprocal)) -
                                    torch.exp(2 * torch.lgamma(1 + self.concentration_reciprocal)))

    def entropy(self):
        return euler_constant * (1 - self.concentration_reciprocal) + \
            torch.log(self.scale * self.concentration_reciprocal) + 1

            

Reported by Pylint.

Module 'torch' has no 'exp' member
Error

Line: 58 Column: 37

                  @property
    def variance(self):
        return self.scale.pow(2) * (torch.exp(torch.lgamma(1 + 2 * self.concentration_reciprocal)) -
                                    torch.exp(2 * torch.lgamma(1 + self.concentration_reciprocal)))

    def entropy(self):
        return euler_constant * (1 - self.concentration_reciprocal) + \
            torch.log(self.scale * self.concentration_reciprocal) + 1

            

Reported by Pylint.

Module 'torch' has no 'lgamma' member
Error

Line: 58 Column: 51

                  @property
    def variance(self):
        return self.scale.pow(2) * (torch.exp(torch.lgamma(1 + 2 * self.concentration_reciprocal)) -
                                    torch.exp(2 * torch.lgamma(1 + self.concentration_reciprocal)))

    def entropy(self):
        return euler_constant * (1 - self.concentration_reciprocal) + \
            torch.log(self.scale * self.concentration_reciprocal) + 1

            

Reported by Pylint.

Module 'torch' has no 'log' member
Error

Line: 62 Column: 13

              
    def entropy(self):
        return euler_constant * (1 - self.concentration_reciprocal) + \
            torch.log(self.scale * self.concentration_reciprocal) + 1

            

Reported by Pylint.

Method 'enumerate_support' is abstract in class 'Distribution' but is not overridden
Error

Line: 10 Column: 1

              from torch.distributions.gumbel import euler_constant


class Weibull(TransformedDistribution):
    r"""
    Samples from a two-parameter Weibull distribution.

    Example:


            

Reported by Pylint.

Access to a protected member _validate_args of a client class
Error

Line: 48 Column: 9

                      super(Weibull, new).__init__(base_dist,
                                     transforms,
                                     validate_args=False)
        new._validate_args = self._validate_args
        return new

    @property
    def mean(self):
        return self.scale * torch.exp(torch.lgamma(1 + self.concentration_reciprocal))

            

Reported by Pylint.

torch/distributed/elastic/rendezvous/registry.py
12 issues
Attempted relative import beyond top-level package
Error

Line: 7 Column: 1

              # This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

from .api import RendezvousHandler, RendezvousParameters
from .api import rendezvous_handler_registry as handler_registry
from .dynamic_rendezvous import create_handler


def _create_static_handler(params: RendezvousParameters) -> RendezvousHandler:

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 8 Column: 1

              # LICENSE file in the root directory of this source tree.

from .api import RendezvousHandler, RendezvousParameters
from .api import rendezvous_handler_registry as handler_registry
from .dynamic_rendezvous import create_handler


def _create_static_handler(params: RendezvousParameters) -> RendezvousHandler:
    from . import static_tcp_rendezvous

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 9 Column: 1

              
from .api import RendezvousHandler, RendezvousParameters
from .api import rendezvous_handler_registry as handler_registry
from .dynamic_rendezvous import create_handler


def _create_static_handler(params: RendezvousParameters) -> RendezvousHandler:
    from . import static_tcp_rendezvous


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 13 Column: 5

              

def _create_static_handler(params: RendezvousParameters) -> RendezvousHandler:
    from . import static_tcp_rendezvous

    return static_tcp_rendezvous.create_rdzv_handler(params)


def _create_etcd_handler(params: RendezvousParameters) -> RendezvousHandler:

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 19 Column: 5

              

def _create_etcd_handler(params: RendezvousParameters) -> RendezvousHandler:
    from . import etcd_rendezvous

    return etcd_rendezvous.create_rdzv_handler(params)


def _create_etcd_v2_handler(params: RendezvousParameters) -> RendezvousHandler:

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 25 Column: 5

              

def _create_etcd_v2_handler(params: RendezvousParameters) -> RendezvousHandler:
    from .etcd_rendezvous_backend import create_backend

    backend, store = create_backend(params)

    return create_handler(store, backend, params)


            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 33 Column: 5

              

def _create_c10d_handler(params: RendezvousParameters) -> RendezvousHandler:
    from .c10d_rendezvous_backend import create_backend

    backend, store = create_backend(params)

    return create_handler(store, backend, params)


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

from .api import RendezvousHandler, RendezvousParameters
from .api import rendezvous_handler_registry as handler_registry
from .dynamic_rendezvous import create_handler

            

Reported by Pylint.

Import outside toplevel (.static_tcp_rendezvous)
Error

Line: 13 Column: 5

              

def _create_static_handler(params: RendezvousParameters) -> RendezvousHandler:
    from . import static_tcp_rendezvous

    return static_tcp_rendezvous.create_rdzv_handler(params)


def _create_etcd_handler(params: RendezvousParameters) -> RendezvousHandler:

            

Reported by Pylint.

Import outside toplevel (.etcd_rendezvous)
Error

Line: 19 Column: 5

              

def _create_etcd_handler(params: RendezvousParameters) -> RendezvousHandler:
    from . import etcd_rendezvous

    return etcd_rendezvous.create_rdzv_handler(params)


def _create_etcd_v2_handler(params: RendezvousParameters) -> RendezvousHandler:

            

Reported by Pylint.

test/package/package_a/std_sys_module_hacks.py
12 issues
Unable to import 'typing.io'
Error

Line: 4 Column: 1

              import os  # noqa: F401
import os.path  # noqa: F401
import typing  # noqa: F401
import typing.io  # noqa: F401
import typing.re  # noqa: F401

import torch



            

Reported by Pylint.

Unable to import 'typing.re'
Error

Line: 5 Column: 1

              import os.path  # noqa: F401
import typing  # noqa: F401
import typing.io  # noqa: F401
import typing.re  # noqa: F401

import torch


class Module(torch.nn.Module):

            

Reported by Pylint.

Unable to import 'torch'
Error

Line: 7 Column: 1

              import typing.io  # noqa: F401
import typing.re  # noqa: F401

import torch


class Module(torch.nn.Module):
    def __init__(self):
        super().__init__()

            

Reported by Pylint.

Unused import typing
Error

Line: 3 Column: 1

              import os  # noqa: F401
import os.path  # noqa: F401
import typing  # noqa: F401
import typing.io  # noqa: F401
import typing.re  # noqa: F401

import torch



            

Reported by Pylint.

Unused import typing.io
Error

Line: 4 Column: 1

              import os  # noqa: F401
import os.path  # noqa: F401
import typing  # noqa: F401
import typing.io  # noqa: F401
import typing.re  # noqa: F401

import torch



            

Reported by Pylint.

Unused import typing.re
Error

Line: 5 Column: 1

              import os.path  # noqa: F401
import typing  # noqa: F401
import typing.io  # noqa: F401
import typing.re  # noqa: F401

import torch


class Module(torch.nn.Module):

            

Reported by Pylint.

Useless super delegation in method '__init__'
Error

Line: 11 Column: 5

              

class Module(torch.nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self):
        return os.path.abspath("test")

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import os  # noqa: F401
import os.path  # noqa: F401
import typing  # noqa: F401
import typing.io  # noqa: F401
import typing.re  # noqa: F401

import torch



            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 10 Column: 1

              import torch


class Module(torch.nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self):
        return os.path.abspath("test")

            

Reported by Pylint.

Missing class docstring
Error

Line: 10 Column: 1

              import torch


class Module(torch.nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self):
        return os.path.abspath("test")

            

Reported by Pylint.

torch/fx/experimental/fx2trt/converters/__init__.py
12 issues
Unable to import '__init__.activation'
Error

Line: 1 Column: 1

              from .activation import *  # noqa: F403
from .adaptive_avgpool import *  # noqa: F403
from .add import *  # noqa: F403
from .batchnorm import *  # noqa: F403
from .convolution import *  # noqa: F403
from .linear import *  # noqa: F403
from .maxpool import *  # noqa: F403
from .mul import *  # noqa: F403
from .transformation import *  # noqa: F403

            

Reported by Pylint.

Unable to import '__init__.adaptive_avgpool'
Error

Line: 2 Column: 1

              from .activation import *  # noqa: F403
from .adaptive_avgpool import *  # noqa: F403
from .add import *  # noqa: F403
from .batchnorm import *  # noqa: F403
from .convolution import *  # noqa: F403
from .linear import *  # noqa: F403
from .maxpool import *  # noqa: F403
from .mul import *  # noqa: F403
from .transformation import *  # noqa: F403

            

Reported by Pylint.

Unable to import '__init__.add'
Error

Line: 3 Column: 1

              from .activation import *  # noqa: F403
from .adaptive_avgpool import *  # noqa: F403
from .add import *  # noqa: F403
from .batchnorm import *  # noqa: F403
from .convolution import *  # noqa: F403
from .linear import *  # noqa: F403
from .maxpool import *  # noqa: F403
from .mul import *  # noqa: F403
from .transformation import *  # noqa: F403

            

Reported by Pylint.

Unable to import '__init__.batchnorm'
Error

Line: 4 Column: 1

              from .activation import *  # noqa: F403
from .adaptive_avgpool import *  # noqa: F403
from .add import *  # noqa: F403
from .batchnorm import *  # noqa: F403
from .convolution import *  # noqa: F403
from .linear import *  # noqa: F403
from .maxpool import *  # noqa: F403
from .mul import *  # noqa: F403
from .transformation import *  # noqa: F403

            

Reported by Pylint.

Unable to import '__init__.convolution'
Error

Line: 5 Column: 1

              from .adaptive_avgpool import *  # noqa: F403
from .add import *  # noqa: F403
from .batchnorm import *  # noqa: F403
from .convolution import *  # noqa: F403
from .linear import *  # noqa: F403
from .maxpool import *  # noqa: F403
from .mul import *  # noqa: F403
from .transformation import *  # noqa: F403
from .quantization import *  # noqa: F403

            

Reported by Pylint.

Unable to import '__init__.linear'
Error

Line: 6 Column: 1

              from .add import *  # noqa: F403
from .batchnorm import *  # noqa: F403
from .convolution import *  # noqa: F403
from .linear import *  # noqa: F403
from .maxpool import *  # noqa: F403
from .mul import *  # noqa: F403
from .transformation import *  # noqa: F403
from .quantization import *  # noqa: F403
from .acc_ops_converters import *  # noqa: F403

            

Reported by Pylint.

Unable to import '__init__.maxpool'
Error

Line: 7 Column: 1

              from .batchnorm import *  # noqa: F403
from .convolution import *  # noqa: F403
from .linear import *  # noqa: F403
from .maxpool import *  # noqa: F403
from .mul import *  # noqa: F403
from .transformation import *  # noqa: F403
from .quantization import *  # noqa: F403
from .acc_ops_converters import *  # noqa: F403

            

Reported by Pylint.

Unable to import '__init__.mul'
Error

Line: 8 Column: 1

              from .convolution import *  # noqa: F403
from .linear import *  # noqa: F403
from .maxpool import *  # noqa: F403
from .mul import *  # noqa: F403
from .transformation import *  # noqa: F403
from .quantization import *  # noqa: F403
from .acc_ops_converters import *  # noqa: F403

            

Reported by Pylint.

Unable to import '__init__.transformation'
Error

Line: 9 Column: 1

              from .linear import *  # noqa: F403
from .maxpool import *  # noqa: F403
from .mul import *  # noqa: F403
from .transformation import *  # noqa: F403
from .quantization import *  # noqa: F403
from .acc_ops_converters import *  # noqa: F403

            

Reported by Pylint.

Unable to import '__init__.quantization'
Error

Line: 10 Column: 1

              from .maxpool import *  # noqa: F403
from .mul import *  # noqa: F403
from .transformation import *  # noqa: F403
from .quantization import *  # noqa: F403
from .acc_ops_converters import *  # noqa: F403

            

Reported by Pylint.