The following issues were found

tools/codegen/api/structured.py
23 issues
TODO: delete these special cases; see tools.codegen.api.cpp--these
Error

Line: 49 Column: 3

                              "resolve torch::List issue, see "
                "https://fb.workplace.com/groups/894363187646754/permalink/1149276442155426"
            )
        # TODO: delete these special cases; see tools.codegen.api.cpp--these
        # must be changed in tandem, but there are problems; see
        # https://github.com/pytorch/pytorch/pull/51485
        elif str(t.elem) == 'int':
            return NamedCType(binds, BaseCType(intArrayRefT))
        elif str(t.elem) == 'Dimname':

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              from tools.codegen.model import (Argument, BaseTy, BaseType, ListType,
                                 NativeFunctionsGroup, OptionalType,
                                 SelfArgument, TensorOptionsArguments, Type,
                                 assert_never)

from tools.codegen.api.types import (ArgName, BaseCType, Binding, ArrayRefCType,
                                     ConstRefCType, OptionalCType, NamedCType,
                                     tensorT, scalarT, intArrayRefT, dimnameListT,
                                     optionalTensorRefT, optionalScalarRefT)

            

Reported by Pylint.

standard import "from typing import Union, List" should be placed before "from tools.codegen.model import Argument, BaseTy, BaseType, ListType, NativeFunctionsGroup, OptionalType, SelfArgument, TensorOptionsArguments, Type, assert_never"
Error

Line: 13 Column: 1

              
from tools.codegen.api import cpp

from typing import Union, List

# This file describes the translation of JIT schema to the structured functions API.
# This is similar to native API, but a number of historical problems with native
# API have been fixed.


            

Reported by Pylint.

Too many return statements (9/6)
Error

Line: 22 Column: 1

              # Translation of types occuring in JIT arguments to a C++ argument type.
# NB: For now, mutable doesn't do anything; but it could if we make
# some more nominal types
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
    # If it's a value type, do the value type translation
    r = cpp.valuetype_type(t, binds=binds)
    if r is not None:
        return r


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 22 Column: 1

              # Translation of types occuring in JIT arguments to a C++ argument type.
# NB: For now, mutable doesn't do anything; but it could if we make
# some more nominal types
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
    # If it's a value type, do the value type translation
    r = cpp.valuetype_type(t, binds=binds)
    if r is not None:
        return r


            

Reported by Pylint.

Too many branches (13/12)
Error

Line: 22 Column: 1

              # Translation of types occuring in JIT arguments to a C++ argument type.
# NB: For now, mutable doesn't do anything; but it could if we make
# some more nominal types
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
    # If it's a value type, do the value type translation
    r = cpp.valuetype_type(t, binds=binds)
    if r is not None:
        return r


            

Reported by Pylint.

Argument name "t" doesn't conform to snake_case naming style
Error

Line: 22 Column: 1

              # Translation of types occuring in JIT arguments to a C++ argument type.
# NB: For now, mutable doesn't do anything; but it could if we make
# some more nominal types
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
    # If it's a value type, do the value type translation
    r = cpp.valuetype_type(t, binds=binds)
    if r is not None:
        return r


            

Reported by Pylint.

Variable name "r" doesn't conform to snake_case naming style
Error

Line: 24 Column: 5

              # some more nominal types
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
    # If it's a value type, do the value type translation
    r = cpp.valuetype_type(t, binds=binds)
    if r is not None:
        return r

    if isinstance(t, BaseType):
        if t.name == BaseTy.Tensor:

            

Reported by Pylint.

Unnecessary "elif" after "return"
Error

Line: 29 Column: 9

                      return r

    if isinstance(t, BaseType):
        if t.name == BaseTy.Tensor:
            return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
        elif t.name == BaseTy.Scalar:
            return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
        else:
            raise AssertionError(f"base type should have been value type {t}")

            

Reported by Pylint.

Unnecessary "elif" after "return"
Error

Line: 36 Column: 9

                      else:
            raise AssertionError(f"base type should have been value type {t}")
    elif isinstance(t, OptionalType):
        if t.elem == BaseType(BaseTy.Tensor):
            return NamedCType(binds, BaseCType(optionalTensorRefT))
        elif t.elem == BaseType(BaseTy.Scalar):
            return NamedCType(binds, BaseCType(optionalScalarRefT))
        elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
        return NamedCType(binds, OptionalCType(elem.type))

            

Reported by Pylint.

torch/distributions/laplace.py
23 issues
Module 'torch' has no 'Size' member
Error

Line: 41 Column: 27

                  def __init__(self, loc, scale, validate_args=None):
        self.loc, self.scale = broadcast_all(loc, scale)
        if isinstance(loc, Number) and isinstance(scale, Number):
            batch_shape = torch.Size()
        else:
            batch_shape = self.loc.size()
        super(Laplace, self).__init__(batch_shape, validate_args=validate_args)

    def expand(self, batch_shape, _instance=None):

            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 48 Column: 23

              
    def expand(self, batch_shape, _instance=None):
        new = self._get_checked_instance(Laplace, _instance)
        batch_shape = torch.Size(batch_shape)
        new.loc = self.loc.expand(batch_shape)
        new.scale = self.scale.expand(batch_shape)
        super(Laplace, new).__init__(batch_shape, validate_args=False)
        new._validate_args = self._validate_args
        return new

            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 55 Column: 36

                      new._validate_args = self._validate_args
        return new

    def rsample(self, sample_shape=torch.Size()):
        shape = self._extended_shape(sample_shape)
        finfo = torch.finfo(self.loc.dtype)
        if torch._C._get_tracing_state():
            # [JIT WORKAROUND] lack of support for .uniform_()
            u = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device) * 2 - 1

            

Reported by Pylint.

Module 'torch' has no 'finfo' member
Error

Line: 57 Column: 17

              
    def rsample(self, sample_shape=torch.Size()):
        shape = self._extended_shape(sample_shape)
        finfo = torch.finfo(self.loc.dtype)
        if torch._C._get_tracing_state():
            # [JIT WORKAROUND] lack of support for .uniform_()
            u = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device) * 2 - 1
            return self.loc - self.scale * u.sign() * torch.log1p(-u.abs().clamp(min=finfo.tiny))
        u = self.loc.new(shape).uniform_(finfo.eps - 1, 1)

            

Reported by Pylint.

Module 'torch' has no 'rand' member
Error

Line: 60 Column: 17

                      finfo = torch.finfo(self.loc.dtype)
        if torch._C._get_tracing_state():
            # [JIT WORKAROUND] lack of support for .uniform_()
            u = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device) * 2 - 1
            return self.loc - self.scale * u.sign() * torch.log1p(-u.abs().clamp(min=finfo.tiny))
        u = self.loc.new(shape).uniform_(finfo.eps - 1, 1)
        # TODO: If we ever implement tensor.nextafter, below is what we want ideally.
        # u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5)
        return self.loc - self.scale * u.sign() * torch.log1p(-u.abs())

            

Reported by Pylint.

Module 'torch' has no 'log1p' member
Error

Line: 61 Column: 55

                      if torch._C._get_tracing_state():
            # [JIT WORKAROUND] lack of support for .uniform_()
            u = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device) * 2 - 1
            return self.loc - self.scale * u.sign() * torch.log1p(-u.abs().clamp(min=finfo.tiny))
        u = self.loc.new(shape).uniform_(finfo.eps - 1, 1)
        # TODO: If we ever implement tensor.nextafter, below is what we want ideally.
        # u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5)
        return self.loc - self.scale * u.sign() * torch.log1p(-u.abs())


            

Reported by Pylint.

Module 'torch' has no 'log1p' member
Error

Line: 65 Column: 51

                      u = self.loc.new(shape).uniform_(finfo.eps - 1, 1)
        # TODO: If we ever implement tensor.nextafter, below is what we want ideally.
        # u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5)
        return self.loc - self.scale * u.sign() * torch.log1p(-u.abs())

    def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)
        return -torch.log(2 * self.scale) - torch.abs(value - self.loc) / self.scale

            

Reported by Pylint.

Module 'torch' has no 'log' member
Error

Line: 70 Column: 17

                  def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)
        return -torch.log(2 * self.scale) - torch.abs(value - self.loc) / self.scale

    def cdf(self, value):
        if self._validate_args:
            self._validate_sample(value)
        return 0.5 - 0.5 * (value - self.loc).sign() * torch.expm1(-(value - self.loc).abs() / self.scale)

            

Reported by Pylint.

Module 'torch' has no 'abs' member
Error

Line: 70 Column: 45

                  def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)
        return -torch.log(2 * self.scale) - torch.abs(value - self.loc) / self.scale

    def cdf(self, value):
        if self._validate_args:
            self._validate_sample(value)
        return 0.5 - 0.5 * (value - self.loc).sign() * torch.expm1(-(value - self.loc).abs() / self.scale)

            

Reported by Pylint.

Module 'torch' has no 'expm1' member
Error

Line: 75 Column: 56

                  def cdf(self, value):
        if self._validate_args:
            self._validate_sample(value)
        return 0.5 - 0.5 * (value - self.loc).sign() * torch.expm1(-(value - self.loc).abs() / self.scale)

    def icdf(self, value):
        term = value - 0.5
        return self.loc - self.scale * (term).sign() * torch.log1p(-2 * term.abs())


            

Reported by Pylint.

torch/distributed/rendezvous.py
23 issues
Attempted relative import beyond top-level package
Error

Line: 13 Column: 1

              from datetime import timedelta
from typing import Optional, Dict, Union
from torch.distributed import FileStore, TCPStore, PrefixStore
from .constants import default_pg_timeout

_rendezvous_handlers = {}


def register_rendezvous_handler(scheme, handler):

            

Reported by Pylint.

Consider explicitly re-raising using the 'from' keyword
Error

Line: 4 Column: 5

              try:
    from urllib.parse import urlparse, urlunparse
except ImportError:
    raise ImportError("urllib cannot be found, urlparse from python2 is no longer supported.")

import torch._six as six
import numbers
import os
import sys

            

Reported by Pylint.

Using the global statement
Error

Line: 41 Column: 5

                          the corresponding scheme. It must be a generator function
            that yields the triplet.
    """
    global _rendezvous_handlers
    if scheme in _rendezvous_handlers:
        raise RuntimeError(
            "Rendezvous handler for {}:// already registered".format(scheme)
        )
    _rendezvous_handlers[scheme] = handler

            

Reported by Pylint.

Unused argument 'kwargs'
Error

Line: 89 Column: 1

                  return ValueError("Error initializing torch.distributed using " + msg)


def _file_rendezvous_handler(url: str, **kwargs):
    def _error(msg):
        return _rendezvous_error("file:// rendezvous: " + msg)

    result = urlparse(url)
    path = result.path

            

Reported by Pylint.

Unused argument 'kwargs'
Error

Line: 122 Column: 1

                  raise RuntimeError("Unable to perform rerendezvous using file:// method")


def _tcp_rendezvous_handler(url: str, timeout: timedelta = default_pg_timeout, **kwargs):
    def _error(msg):
        return _rendezvous_error("tcp:// rendezvous: " + msg)

    result = urlparse(url)
    if not result.port:

            

Reported by Pylint.

Unused argument 'kwargs'
Error

Line: 150 Column: 1

                  raise RuntimeError("Unable to perform rerendezvous using tcp:// method")


def _env_rendezvous_handler(url: str, timeout: timedelta = default_pg_timeout, **kwargs):
    def _error(msg):
        return _rendezvous_error("env:// rendezvous: " + msg)

    def _env_error(var):
        return _error("environment variable %s expected, but not set" % var)

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              try:
    from urllib.parse import urlparse, urlunparse
except ImportError:
    raise ImportError("urllib cannot be found, urlparse from python2 is no longer supported.")

import torch._six as six
import numbers
import os
import sys

            

Reported by Pylint.

standard import "import numbers" should be placed before "import torch._six as six"
Error

Line: 7 Column: 1

                  raise ImportError("urllib cannot be found, urlparse from python2 is no longer supported.")

import torch._six as six
import numbers
import os
import sys
from datetime import timedelta
from typing import Optional, Dict, Union
from torch.distributed import FileStore, TCPStore, PrefixStore

            

Reported by Pylint.

standard import "import os" should be placed before "import torch._six as six"
Error

Line: 8 Column: 1

              
import torch._six as six
import numbers
import os
import sys
from datetime import timedelta
from typing import Optional, Dict, Union
from torch.distributed import FileStore, TCPStore, PrefixStore
from .constants import default_pg_timeout

            

Reported by Pylint.

standard import "import sys" should be placed before "import torch._six as six"
Error

Line: 9 Column: 1

              import torch._six as six
import numbers
import os
import sys
from datetime import timedelta
from typing import Optional, Dict, Union
from torch.distributed import FileStore, TCPStore, PrefixStore
from .constants import default_pg_timeout


            

Reported by Pylint.

test/typing/reveal/tensor_sampling.py
23 issues
Unable to import 'torch'
Error

Line: 2 Column: 1

              # flake8: noqa
import torch

# seed
reveal_type(torch.seed())  # E: int

# manual_seed
reveal_type(torch.manual_seed(3))  # E: torch._C.Generator


            

Reported by Pylint.

Undefined variable 'reveal_type'
Error

Line: 5 Column: 1

              import torch

# seed
reveal_type(torch.seed())  # E: int

# manual_seed
reveal_type(torch.manual_seed(3))  # E: torch._C.Generator

# initial_seed

            

Reported by Pylint.

Undefined variable 'reveal_type'
Error

Line: 8 Column: 1

              reveal_type(torch.seed())  # E: int

# manual_seed
reveal_type(torch.manual_seed(3))  # E: torch._C.Generator

# initial_seed
reveal_type(torch.initial_seed())  # E: int

# get_rng_state

            

Reported by Pylint.

Undefined variable 'reveal_type'
Error

Line: 11 Column: 1

              reveal_type(torch.manual_seed(3))  # E: torch._C.Generator

# initial_seed
reveal_type(torch.initial_seed())  # E: int

# get_rng_state
reveal_type(torch.get_rng_state())  # E: {Tensor}

# bernoulli

            

Reported by Pylint.

Undefined variable 'reveal_type'
Error

Line: 14 Column: 1

              reveal_type(torch.initial_seed())  # E: int

# get_rng_state
reveal_type(torch.get_rng_state())  # E: {Tensor}

# bernoulli
reveal_type(torch.bernoulli(torch.empty(3, 3).uniform_(0, 1)))  # E: {Tensor}

# multinomial

            

Reported by Pylint.

Undefined variable 'reveal_type'
Error

Line: 17 Column: 1

              reveal_type(torch.get_rng_state())  # E: {Tensor}

# bernoulli
reveal_type(torch.bernoulli(torch.empty(3, 3).uniform_(0, 1)))  # E: {Tensor}

# multinomial
weights = torch.tensor([0, 10, 3, 0], dtype=torch.float)
reveal_type(torch.multinomial(weights, 2))  # E: {Tensor}


            

Reported by Pylint.

Undefined variable 'reveal_type'
Error

Line: 21 Column: 1

              
# multinomial
weights = torch.tensor([0, 10, 3, 0], dtype=torch.float)
reveal_type(torch.multinomial(weights, 2))  # E: {Tensor}

# normal
reveal_type(torch.normal(2, 3, size=(1, 4)))  # E: {Tensor}

# poisson

            

Reported by Pylint.

Undefined variable 'reveal_type'
Error

Line: 24 Column: 1

              reveal_type(torch.multinomial(weights, 2))  # E: {Tensor}

# normal
reveal_type(torch.normal(2, 3, size=(1, 4)))  # E: {Tensor}

# poisson
reveal_type(torch.poisson(torch.rand(4, 4) * 5))  # E: {Tensor}

# rand

            

Reported by Pylint.

Undefined variable 'reveal_type'
Error

Line: 27 Column: 1

              reveal_type(torch.normal(2, 3, size=(1, 4)))  # E: {Tensor}

# poisson
reveal_type(torch.poisson(torch.rand(4, 4) * 5))  # E: {Tensor}

# rand
reveal_type(torch.rand(4))  # E: {Tensor}
reveal_type(torch.rand(2, 3))  # E: {Tensor}


            

Reported by Pylint.

Undefined variable 'reveal_type'
Error

Line: 30 Column: 1

              reveal_type(torch.poisson(torch.rand(4, 4) * 5))  # E: {Tensor}

# rand
reveal_type(torch.rand(4))  # E: {Tensor}
reveal_type(torch.rand(2, 3))  # E: {Tensor}

# rand_like
a = torch.rand(4)
reveal_type(torch.rand_like(a))  # E: {Tensor}

            

Reported by Pylint.

tools/testing/explicit_ci_jobs.py
23 issues
Redefining built-in 'type'
Error

Line: 33 Column: 5

              def add_job(
    workflows: Dict[str, Any],
    workflow_name: str,
    type: str,
    job: Dict[str, Any],
    past_jobs: Dict[str, Any],
) -> None:
    """
    Add job 'job' under 'type' and 'workflow_name' to 'workflow' in place. Also

            

Reported by Pylint.

Redefining built-in 'type'
Error

Line: 68 Column: 17

                          continue

        for job_dict in workflow["jobs"]:
            for type, job in job_dict.items():
                if "name" not in job:
                    # Job doesn't have a name so it can't be handled
                    print("Skipping", type)
                else:
                    if job["name"] in relevant_jobs:

            

Reported by Pylint.

Redefining name 'message' from outer scope (line 134)
Error

Line: 88 Column: 33

                  return new_workflows


def commit_ci(files: List[str], message: str) -> None:
    # Check that there are no other modified files than the ones edited by this
    # tool
    stdout = subprocess.run(["git", "status", "--porcelain"], stdout=subprocess.PIPE).stdout.decode()
    for line in stdout.split("\n"):
        if line == "":

            

Reported by Pylint.

Using subprocess.run without explicitly set `check` is not recommended.
Error

Line: 91 Column: 14

              def commit_ci(files: List[str], message: str) -> None:
    # Check that there are no other modified files than the ones edited by this
    # tool
    stdout = subprocess.run(["git", "status", "--porcelain"], stdout=subprocess.PIPE).stdout.decode()
    for line in stdout.split("\n"):
        if line == "":
            continue
        if line[0] != " ":
            raise RuntimeError(f"Refusing to commit while other changes are already staged: {line}")

            

Reported by Pylint.

Using subprocess.run without explicitly set `check` is not recommended.
Error

Line: 100 Column: 5

              

    # Make the commit
    subprocess.run(["git", "add"] + files)
    subprocess.run(["git", "commit", "-m", message])


if __name__ == "__main__":
    parser = argparse.ArgumentParser(

            

Reported by Pylint.

Using subprocess.run without explicitly set `check` is not recommended.
Error

Line: 101 Column: 5

              
    # Make the commit
    subprocess.run(["git", "add"] + files)
    subprocess.run(["git", "commit", "-m", message])


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="make .circleci/config.yml only have a specific set of jobs and delete GitHub actions"

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              #!/usr/bin/env python3

import yaml
import textwrap
import subprocess
import pathlib
import argparse

from typing import Dict, List, Any

            

Reported by Pylint.

standard import "import textwrap" should be placed before "import yaml"
Error

Line: 4 Column: 1

              #!/usr/bin/env python3

import yaml
import textwrap
import subprocess
import pathlib
import argparse

from typing import Dict, List, Any

            

Reported by Pylint.

Consider possible security implications associated with subprocess module.
Security blacklist

Line: 5
Suggestion: https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess

              
import yaml
import textwrap
import subprocess
import pathlib
import argparse

from typing import Dict, List, Any


            

Reported by Bandit.

standard import "import subprocess" should be placed before "import yaml"
Error

Line: 5 Column: 1

              
import yaml
import textwrap
import subprocess
import pathlib
import argparse

from typing import Dict, List, Any


            

Reported by Pylint.

torch/autograd/forward_ad.py
23 issues
Attempted relative import beyond top-level package
Error

Line: 2 Column: 1

              import torch
from .grad_mode import _DecoratorContextManager

from typing import Any

# TODO(alband): Once most of the formulas are implemented, these functions need to be added
# to the main doc to make them fully "public".

# Global variable used to make the python API simpler to use

            

Reported by Pylint.

Module 'torch._VF' has no '_make_dual' member
Error

Line: 68 Column: 12

                      raise RuntimeError("Trying to create a dual Tensor for forward AD but no level "
                           "exists, make sure to enter_dual_level() first.")

    return torch._VF._make_dual(tensor, tangent, level=level)

def unpack_dual(tensor, *, level=None):
    r"""Function that unpacks a "dual object" to recover two plain tensors, one representing
    the primal and the other the tangent (both are views of :attr:`tensor`. Neither of these
    tensors can be dual tensor of level :attr:`level`.

            

Reported by Pylint.

Module 'torch._VF' has no '_unpack_dual' member
Error

Line: 83 Column: 12

                  if level < 0:
        return tensor, None

    return torch._VF._unpack_dual(tensor, level=level)

class dual_level(_DecoratorContextManager):
    r"""Context-manager that controls the current forward ad level. It
    appropriately enters and exit the dual level.


            

Reported by Pylint.

TODO(alband): Once most of the formulas are implemented, these functions need to be added
Error

Line: 6 Column: 3

              
from typing import Any

# TODO(alband): Once most of the formulas are implemented, these functions need to be added
# to the main doc to make them fully "public".

# Global variable used to make the python API simpler to use
_current_level = -1


            

Reported by Pylint.

Using the global statement
Error

Line: 20 Column: 5

                  This function also updates the current level that is used by default
    by the other functions in this API.
    """
    global _current_level
    new_level = torch._C._enter_dual_level()
    if new_level != _current_level + 1:
        raise RuntimeError("Entering a new forward AD level but the current level "
                           "is not valid. Make sure you did not modified it directly.")
    _current_level = new_level

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 21 Column: 17

                  by the other functions in this API.
    """
    global _current_level
    new_level = torch._C._enter_dual_level()
    if new_level != _current_level + 1:
        raise RuntimeError("Entering a new forward AD level but the current level "
                           "is not valid. Make sure you did not modified it directly.")
    _current_level = new_level
    return new_level

            

Reported by Pylint.

Access to a protected member _enter_dual_level of a client class
Error

Line: 21 Column: 17

                  by the other functions in this API.
    """
    global _current_level
    new_level = torch._C._enter_dual_level()
    if new_level != _current_level + 1:
        raise RuntimeError("Entering a new forward AD level but the current level "
                           "is not valid. Make sure you did not modified it directly.")
    _current_level = new_level
    return new_level

            

Reported by Pylint.

Using the global statement
Error

Line: 36 Column: 5

                  This function also updates the current level that is used by default
    by the other functions in this API.
    """
    global _current_level
    if level is None:
        level = _current_level
    if level != _current_level:
        raise RuntimeError("Trying to exit a forward AD level that was not the last one "
                           "that was created. This is not supported.")

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 42 Column: 5

                  if level != _current_level:
        raise RuntimeError("Trying to exit a forward AD level that was not the last one "
                           "that was created. This is not supported.")
    torch._C._exit_dual_level(level=level)
    _current_level = level - 1

def make_dual(tensor, tangent, *, level=None):
    r"""Function that creates a "dual object" that can be used to compute forward AD gradients
    based on the given Tensor and its tangent. It returns a new Tensor that shares memory with

            

Reported by Pylint.

Access to a protected member _exit_dual_level of a client class
Error

Line: 42 Column: 5

                  if level != _current_level:
        raise RuntimeError("Trying to exit a forward AD level that was not the last one "
                           "that was created. This is not supported.")
    torch._C._exit_dual_level(level=level)
    _current_level = level - 1

def make_dual(tensor, tangent, *, level=None):
    r"""Function that creates a "dual object" that can be used to compute forward AD gradients
    based on the given Tensor and its tangent. It returns a new Tensor that shares memory with

            

Reported by Pylint.

test/test_functional_optim.py
23 issues
Unable to import 'torch'
Error

Line: 3 Column: 1

              import unittest

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import SGD, Adam, AdamW
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
from torch.distributed.optim import functional_optim_map


            

Reported by Pylint.

Unable to import 'torch.nn'
Error

Line: 4 Column: 1

              import unittest

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import SGD, Adam, AdamW
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
from torch.distributed.optim import functional_optim_map


            

Reported by Pylint.

Unable to import 'torch.nn.functional'
Error

Line: 5 Column: 1

              
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import SGD, Adam, AdamW
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
from torch.distributed.optim import functional_optim_map

class MyModule(torch.nn.Module):

            

Reported by Pylint.

Unable to import 'torch.optim'
Error

Line: 6 Column: 1

              import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import SGD, Adam, AdamW
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
from torch.distributed.optim import functional_optim_map

class MyModule(torch.nn.Module):
    def __init__(self):

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 7 Column: 1

              import torch.nn as nn
import torch.nn.functional as F
from torch.optim import SGD, Adam, AdamW
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
from torch.distributed.optim import functional_optim_map

class MyModule(torch.nn.Module):
    def __init__(self):
        super().__init__()

            

Reported by Pylint.

Unable to import 'torch.distributed.optim'
Error

Line: 8 Column: 1

              import torch.nn.functional as F
from torch.optim import SGD, Adam, AdamW
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
from torch.distributed.optim import functional_optim_map

class MyModule(torch.nn.Module):
    def __init__(self):
        super().__init__()
        torch.manual_seed(0)

            

Reported by Pylint.

Unused variable 'functional_params'
Error

Line: 30 Column: 9

                      module_optim = MyModule()
        module_functional = MyModule()
        optim_params = module_optim.parameters()
        functional_params = module_functional.parameters()
        optim = optim_cls(optim_params, *args, **kwargs)
        functional_optim_cls = functional_optim_map.get(optim_cls, None)
        if not functional_optim_cls:
            raise ValueError(f"Functional optimizer not implemented for {optim_cls}")
        optim_functional = functional_optim_cls(

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import unittest

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import SGD, Adam, AdamW
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
from torch.distributed.optim import functional_optim_map


            

Reported by Pylint.

Missing class docstring
Error

Line: 10 Column: 1

              from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
from torch.distributed.optim import functional_optim_map

class MyModule(torch.nn.Module):
    def __init__(self):
        super().__init__()
        torch.manual_seed(0)
        self.lin1 = nn.Linear(3, 3, bias=False)
        self.lin2 = nn.Linear(3, 3, bias=False)

            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 10 Column: 1

              from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
from torch.distributed.optim import functional_optim_map

class MyModule(torch.nn.Module):
    def __init__(self):
        super().__init__()
        torch.manual_seed(0)
        self.lin1 = nn.Linear(3, 3, bias=False)
        self.lin2 = nn.Linear(3, 3, bias=False)

            

Reported by Pylint.

tools/code_coverage/package/tool/summarize_jsons.py
23 issues
Attempted relative import beyond top-level package
Error

Line: 6 Column: 1

              import time
from typing import Any, Dict, List, Set, Tuple

from ..util.setting import (
    JSON_FOLDER_BASE_DIR,
    CompilerType,
    TestList,
    TestPlatform,
    TestStatusType,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 13 Column: 1

                  TestPlatform,
    TestStatusType,
)
from ..util.utils import (
    detect_compiler_type,
    print_error,
    print_time,
    related_to_test_list,
)

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 19 Column: 1

                  print_time,
    related_to_test_list,
)
from .parser.coverage_record import CoverageRecord
from .parser.gcov_coverage_parser import GcovCoverageParser
from .parser.llvm_coverage_parser import LlvmCoverageParser
from .print_report import (
    file_oriented_report,
    html_oriented_report,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 20 Column: 1

                  related_to_test_list,
)
from .parser.coverage_record import CoverageRecord
from .parser.gcov_coverage_parser import GcovCoverageParser
from .parser.llvm_coverage_parser import LlvmCoverageParser
from .print_report import (
    file_oriented_report,
    html_oriented_report,
    line_oriented_report,

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 21 Column: 1

              )
from .parser.coverage_record import CoverageRecord
from .parser.gcov_coverage_parser import GcovCoverageParser
from .parser.llvm_coverage_parser import LlvmCoverageParser
from .print_report import (
    file_oriented_report,
    html_oriented_report,
    line_oriented_report,
)

            

Reported by Pylint.

Attempted relative import beyond top-level package
Error

Line: 22 Column: 1

              from .parser.coverage_record import CoverageRecord
from .parser.gcov_coverage_parser import GcovCoverageParser
from .parser.llvm_coverage_parser import LlvmCoverageParser
from .print_report import (
    file_oriented_report,
    html_oriented_report,
    line_oriented_report,
)


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import json
import os
import time
from typing import Any, Dict, List, Set, Tuple

from ..util.setting import (
    JSON_FOLDER_BASE_DIR,
    CompilerType,
    TestList,

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 35 Column: 1

              tests_type: TestStatusType = {"success": set(), "partial": set(), "fail": set()}


def transform_file_name(
    file_path: str, interested_folders: List[str], platform: TestPlatform
) -> str:
    remove_patterns: Set[str] = {".DEFAULT.cpp", ".AVX.cpp", ".AVX2.cpp"}
    for pattern in remove_patterns:
        file_path = file_path.replace(pattern, "")

            

Reported by Pylint.

Import outside toplevel (package.oss.utils.get_pytorch_folder)
Error

Line: 48 Column: 9

                              return file_path[file_path.find(folder) :]
    # remove pytorch base folder path
    if platform == TestPlatform.OSS:
        from package.oss.utils import get_pytorch_folder

        pytorch_foler = get_pytorch_folder()
        assert file_path.startswith(pytorch_foler)
        file_path = file_path[len(pytorch_foler) + 1 :]
    return file_path

            

Reported by Pylint.

Use of assert detected. The enclosed code will be removed when compiling to optimised byte code.
Security

Line: 51
Suggestion: https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html

                      from package.oss.utils import get_pytorch_folder

        pytorch_foler = get_pytorch_folder()
        assert file_path.startswith(pytorch_foler)
        file_path = file_path[len(pytorch_foler) + 1 :]
    return file_path


def is_intrested_file(

            

Reported by Bandit.

test/test_segment_reductions.py
23 issues
Unable to import 'torch'
Error

Line: 4 Column: 1

              from itertools import product

import numpy as np
import torch
from torch.testing._internal.common_device_type import (
    instantiate_device_type_tests,
    dtypes,
)
from torch.testing._internal.common_utils import (

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_device_type'
Error

Line: 5 Column: 1

              
import numpy as np
import torch
from torch.testing._internal.common_device_type import (
    instantiate_device_type_tests,
    dtypes,
)
from torch.testing._internal.common_utils import (
    TestCase,

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 9 Column: 1

                  instantiate_device_type_tests,
    dtypes,
)
from torch.testing._internal.common_utils import (
    TestCase,
    run_tests,
    gradcheck,
)


            

Reported by Pylint.

Redefining name 'dtypes' from outer scope (line 5)
Error

Line: 110 Column: 38

                          (torch.int, torch.int64),
        )
    )
    def test_simple_1d(self, device, dtypes):
        val_dtype, length_type = dtypes
        lengths = [1, 2, 3, 0]
        data = [1, float("nan"), 3, 4, 5, 5]

        for reduction in reductions:

            

Reported by Pylint.

Redefining name 'dtypes' from outer scope (line 5)
Error

Line: 158 Column: 43

                          (torch.int, torch.int64),
        )
    )
    def test_multi_d_simple(self, device, dtypes):
        val_dtype, length_type = dtypes
        axis = 0
        lengths = [1, 2, 3, 0]
        data = [[1, 1], [float("nan"), 1], [3, float("nan")], [4, 1], [3, 2], [2, 3]]


            

Reported by Pylint.

Unused variable 'length_type'
Error

Line: 159 Column: 20

                      )
    )
    def test_multi_d_simple(self, device, dtypes):
        val_dtype, length_type = dtypes
        axis = 0
        lengths = [1, 2, 3, 0]
        data = [[1, 1], [float("nan"), 1], [3, float("nan")], [4, 1], [3, 2], [2, 3]]

        for reduction in reductions:

            

Reported by Pylint.

Redefining name 'dtypes' from outer scope (line 5)
Error

Line: 253 Column: 36

                          (torch.int, torch.int64),
        )
    )
    def test_multi_d(self, device, dtypes):
        val_dtype, length_type = dtypes
        axis = 0
        lengths = [0, 2]
        data = np.arange(20).reshape(2, 2, 5).tolist()
        expected_grad = []

            

Reported by Pylint.

Unused variable 'length_type'
Error

Line: 254 Column: 20

                      )
    )
    def test_multi_d(self, device, dtypes):
        val_dtype, length_type = dtypes
        axis = 0
        lengths = [0, 2]
        data = np.arange(20).reshape(2, 2, 5).tolist()
        expected_grad = []


            

Reported by Pylint.

TODO: calculate grad and check correctness
Error

Line: 260 Column: 3

                      data = np.arange(20).reshape(2, 2, 5).tolist()
        expected_grad = []

        # TODO: calculate grad and check correctness
        check_backward = False

        for reduction in reductions:
            initial_value = 0
            if reduction == "max":

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              from itertools import product

import numpy as np
import torch
from torch.testing._internal.common_device_type import (
    instantiate_device_type_tests,
    dtypes,
)
from torch.testing._internal.common_utils import (

            

Reported by Pylint.

caffe2/python/ideep/adam_op_test.py
23 issues
Unable to import 'hypothesis.strategies'
Error

Line: 7 Column: 1

              

import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu

            

Reported by Pylint.

Unable to import 'hypothesis'
Error

Line: 11 Column: 1

              import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu


@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestAdamOps(hu.HypothesisTestCase):

            

Reported by Pylint.

Module 'caffe2.python._import_c_extension' has no 'use_mkldnn' member
Error

Line: 15 Column: 22

              import caffe2.python.ideep_test_util as mu


@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestAdamOps(hu.HypothesisTestCase):
    @given(inputs=hu.tensors(n=4),
           ITER=st.integers(min_value=0, max_value=10000),
           LR=st.floats(min_value=0.01, max_value=0.99,
                        allow_nan=False, allow_infinity=False),

            

Reported by Pylint.

Unused argument 'gc'
Error

Line: 28 Column: 66

                         epsilon=st.floats(min_value=0.01, max_value=0.99,
                             allow_nan=False, allow_infinity=False),
           **mu.gcs)
    def test_adam(self, inputs, ITER, LR, beta1, beta2, epsilon, gc, dc):
        param, mom1, mom2, grad = inputs
        ITER = np.array([ITER], dtype=np.int64)
        LR = np.array([LR], dtype=np.float32)
        mom2 = np.absolute(mom2)
        op = core.CreateOperator(

            

Reported by Pylint.

Unused argument 'gc'
Error

Line: 59 Column: 78

                         epsilon=st.floats(min_value=0.01, max_value=0.99,
                             allow_nan=False, allow_infinity=False),
           **mu.gcs)
    def test_adam_output_grad(self, inputs, ITER, LR, beta1, beta2, epsilon, gc, dc):
        param, mom1, mom2, grad = inputs
        ITER = np.array([ITER], dtype=np.int64)
        LR = np.array([LR], dtype=np.float32)
        mom2 = np.absolute(mom2)


            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              




import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu

            

Reported by Pylint.

standard import "import unittest" should be placed before "import numpy as np"
Error

Line: 8 Column: 1

              
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu


            

Reported by Pylint.

Imports from package caffe2 are not grouped
Error

Line: 12 Column: 1

              import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu


@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestAdamOps(hu.HypothesisTestCase):
    @given(inputs=hu.tensors(n=4),

            

Reported by Pylint.

Missing class docstring
Error

Line: 16 Column: 1

              

@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestAdamOps(hu.HypothesisTestCase):
    @given(inputs=hu.tensors(n=4),
           ITER=st.integers(min_value=0, max_value=10000),
           LR=st.floats(min_value=0.01, max_value=0.99,
                        allow_nan=False, allow_infinity=False),
           beta1=st.floats(min_value=0.01, max_value=0.99,

            

Reported by Pylint.

Argument name "ITER" doesn't conform to snake_case naming style
Error

Line: 28 Column: 5

                         epsilon=st.floats(min_value=0.01, max_value=0.99,
                             allow_nan=False, allow_infinity=False),
           **mu.gcs)
    def test_adam(self, inputs, ITER, LR, beta1, beta2, epsilon, gc, dc):
        param, mom1, mom2, grad = inputs
        ITER = np.array([ITER], dtype=np.int64)
        LR = np.array([LR], dtype=np.float32)
        mom2 = np.absolute(mom2)
        op = core.CreateOperator(

            

Reported by Pylint.