The following issues were found

torch/distributed/algorithms/join.py
16 issues
Module 'torch' has no 'device' member
Error

Line: 70 Column: 30

              
    @property
    @abstractmethod
    def join_device(self) -> torch.device:
        r"""
        Returns the device from which to perform collective communications
        needed by the join context manager implementation itself.
        """
        ...

            

Reported by Pylint.

Module 'torch' has no 'zeros' member
Error

Line: 288 Column: 31

                      Returns the number of non-joined processes by shadowing an all-reduce
        in the non-joined processes.
        """
        num_nonjoined_procs = torch.zeros(1, device=self._device)
        dist.all_reduce(num_nonjoined_procs, group=self._process_group)
        return num_nonjoined_procs.item()

    def _notify_procs_to_terminate(self):
        r"""

            

Reported by Pylint.

Module 'torch' has no 'ones' member
Error

Line: 298 Column: 16

                      and raises a ``RuntimeError`` indicating that the current process has
        exhausted its inputs.
        """
        ones = torch.ones(1, device=self._device)
        dist.all_reduce(ones, group=self._process_group)
        # NOTE: Raising `StopIteration` does not throw an error in Python 3.6
        # and throws a `RuntimeError` in Python 3.7+ (PEP 479), so we just
        # raise a `RuntimeError` here
        raise RuntimeError(f"Rank {self._rank} exhausted all inputs.")

            

Reported by Pylint.

Module 'torch' has no 'ones' member
Error

Line: 344 Column: 16

                      process_group = joinable.join_process_group

        # Schedule an all-reduce to indicate that the caller has not yet joined
        ones = torch.ones(1, device=device)
        work = dist.all_reduce(ones, group=process_group, async_op=True)

        if join_config.throw_on_early_termination:
            # Check if uneven inputs have been detected
            zeros = torch.zeros(1, device=device)

            

Reported by Pylint.

Module 'torch' has no 'zeros' member
Error

Line: 349 Column: 21

              
        if join_config.throw_on_early_termination:
            # Check if uneven inputs have been detected
            zeros = torch.zeros(1, device=device)
            dist.all_reduce(zeros, group=process_group)
            should_throw = zeros.item()
            if should_throw:
                raise RuntimeError(
                    "Detected at least one rank that exhausted inputs. "

            

Reported by Pylint.

Unused argument 'is_last_joiner'
Error

Line: 29 Column: 25

                      """
        ...

    def post_hook(self, is_last_joiner: bool) -> None:
        r"""
        This hook is called after all processes have joined. It is passed an
        additional ``bool`` argument ``is_last_joiner``, which indicates if the
        rank is one of the last to join.


            

Reported by Pylint.

Access to a protected member _join_config of a client class
Error

Line: 193 Column: 13

                      assert len(self._joinables) > 0
        is_first_joinable = True
        for joinable in self._joinables:
            joinable._join_config = _JoinConfig(
                enable=self._enable,
                throw_on_early_termination=self._throw_on_early_termination,
                is_first_joinable=is_first_joinable
            )
            is_first_joinable = False

            

Reported by Pylint.

Redefining built-in 'type'
Error

Line: 232 Column: 9

              
    def __exit__(
        self,
        type: Optional[Type[BaseException]],
        value: Optional[BaseException],
        traceback: Optional[TracebackType]
    ):
        r"""
        Repeatedly runs the main hooks until all processes join; then, runs

            

Reported by Pylint.

Access to a protected member _join_config of a client class
Error

Line: 335 Column: 23

                          f"Check that the {type(joinable)} constructor calls the " \
            "``Joinable`` constructor"

        join_config = joinable._join_config
        # First joinable is responsible for the collective communications
        if not join_config.is_first_joinable or not join_config.enable:
            return None

        device = joinable.join_device

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import warnings
from abc import ABC, abstractmethod
from types import TracebackType
from typing import Any, List, NamedTuple, Optional, Type

import torch
import torch.distributed as dist



            

Reported by Pylint.

test/test_functional_autograd_benchmark.py
16 issues
Unable to import 'torch.testing._internal.common_utils'
Error

Line: 1 Column: 1

              from torch.testing._internal.common_utils import TestCase, run_tests, slowTest, IS_WINDOWS

import subprocess
import tempfile
import os
import unittest

PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))


            

Reported by Pylint.

Using subprocess.run without explicitly set `check` is not recommended.
Error

Line: 30 Column: 19

                          if disable_gpu:
                cmd += ['--gpu', '-1']

            res = subprocess.run(cmd)

            self.assertTrue(res.returncode == 0)
            # Check that something was written to the file
            out_file.seek(0, os.SEEK_END)
            self.assertTrue(out_file.tell() > 0)

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              from torch.testing._internal.common_utils import TestCase, run_tests, slowTest, IS_WINDOWS

import subprocess
import tempfile
import os
import unittest

PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))


            

Reported by Pylint.

Consider possible security implications associated with subprocess module.
Security blacklist

Line: 3
Suggestion: https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess

              from torch.testing._internal.common_utils import TestCase, run_tests, slowTest, IS_WINDOWS

import subprocess
import tempfile
import os
import unittest

PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))


            

Reported by Bandit.

standard import "import subprocess" should be placed before "from torch.testing._internal.common_utils import TestCase, run_tests, slowTest, IS_WINDOWS"
Error

Line: 3 Column: 1

              from torch.testing._internal.common_utils import TestCase, run_tests, slowTest, IS_WINDOWS

import subprocess
import tempfile
import os
import unittest

PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))


            

Reported by Pylint.

standard import "import tempfile" should be placed before "from torch.testing._internal.common_utils import TestCase, run_tests, slowTest, IS_WINDOWS"
Error

Line: 4 Column: 1

              from torch.testing._internal.common_utils import TestCase, run_tests, slowTest, IS_WINDOWS

import subprocess
import tempfile
import os
import unittest

PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))


            

Reported by Pylint.

standard import "import os" should be placed before "from torch.testing._internal.common_utils import TestCase, run_tests, slowTest, IS_WINDOWS"
Error

Line: 5 Column: 1

              
import subprocess
import tempfile
import os
import unittest

PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))

# This is a very simple smoke test for the functional autograd benchmarking script.

            

Reported by Pylint.

standard import "import unittest" should be placed before "from torch.testing._internal.common_utils import TestCase, run_tests, slowTest, IS_WINDOWS"
Error

Line: 6 Column: 1

              import subprocess
import tempfile
import os
import unittest

PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))

# This is a very simple smoke test for the functional autograd benchmarking script.
class TestFunctionalAutogradBenchmark(TestCase):

            

Reported by Pylint.

Missing class docstring
Error

Line: 11 Column: 1

              PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))

# This is a very simple smoke test for the functional autograd benchmarking script.
class TestFunctionalAutogradBenchmark(TestCase):
    def _test_runner(self, model, disable_gpu=False):
        # Note about windows:
        # The temporary file is exclusively open by this process and the child process
        # is not allowed to open it again. As this is a simple smoke test, we choose for now
        # not to run this on windows and keep the code here simple.

            

Reported by Pylint.

Line too long (108/100)
Error

Line: 18 Column: 1

                      # is not allowed to open it again. As this is a simple smoke test, we choose for now
        # not to run this on windows and keep the code here simple.
        with tempfile.NamedTemporaryFile() as out_file:
            cmd = ['python', '../benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py']
            # Only run the warmup
            cmd += ['--num-iters', '0']
            # Only run the vjp task (fastest one)
            cmd += ['--task-filter', 'vjp']
            # Only run the specified model

            

Reported by Pylint.

torch/distributed/pipeline/sync/batchnorm.py
16 issues
Attempted relative import beyond top-level package
Error

Line: 15 Column: 1

              from torch.nn.functional import batch_norm
from torch.nn.modules.batchnorm import _BatchNorm

from .checkpoint import is_recomputing

__all__ = ["DeferredBatchNorm"]


TModule = TypeVar("TModule", bound=nn.Module)

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 44 Column: 37

                  ) -> None:
        super().__init__(num_features, eps, momentum, affine, track_running_stats=True)

        self.register_buffer("sum", torch.zeros_like(self.running_mean))
        self.register_buffer("sum_squares", torch.zeros_like(self.running_var))

        self.counter = 0
        self.tracked = 0
        self.chunks = chunks

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 45 Column: 45

                      super().__init__(num_features, eps, momentum, affine, track_running_stats=True)

        self.register_buffer("sum", torch.zeros_like(self.running_mean))
        self.register_buffer("sum_squares", torch.zeros_like(self.running_var))

        self.counter = 0
        self.tracked = 0
        self.chunks = chunks


            

Reported by Pylint.

Instance of 'DeferredBatchNorm' has no 'sum' member
Error

Line: 63 Column: 13

                      dim.extend(range(2, input.dim()))

        with torch.no_grad():
            self.sum += input.sum(dim)
            self.sum_squares += (input ** 2).sum(dim)

        size = input.size().numel() // input.size(1)
        self.counter += size
        self.tracked += 1

            

Reported by Pylint.

Instance of 'DeferredBatchNorm' has no 'sum_squares' member
Error

Line: 64 Column: 13

              
        with torch.no_grad():
            self.sum += input.sum(dim)
            self.sum_squares += (input ** 2).sum(dim)

        size = input.size().numel() // input.size(1)
        self.counter += size
        self.tracked += 1


            

Reported by Pylint.

Instance of 'DeferredBatchNorm' has no 'sum' member
Error

Line: 81 Column: 16

                      else:  # use exponential moving average
            exponential_average_factor = self.momentum

        mean = self.sum / self.counter
        var = self.sum_squares / self.counter - mean ** 2

        # Calculate the exponential moving average here.
        m = exponential_average_factor


            

Reported by Pylint.

Instance of 'DeferredBatchNorm' has no 'sum_squares' member
Error

Line: 82 Column: 15

                          exponential_average_factor = self.momentum

        mean = self.sum / self.counter
        var = self.sum_squares / self.counter - mean ** 2

        # Calculate the exponential moving average here.
        m = exponential_average_factor

        self.running_mean *= 1 - m

            

Reported by Pylint.

Instance of 'DeferredBatchNorm' has no 'sum' member
Error

Line: 93 Column: 9

                      self.running_var *= 1 - m
        self.running_var += var * m

        self.sum.zero_()
        self.sum_squares.zero_()
        self.counter = 0
        self.tracked = 0

    def forward(self, input: Tensor) -> Tensor:

            

Reported by Pylint.

Instance of 'DeferredBatchNorm' has no 'sum_squares' member
Error

Line: 94 Column: 9

                      self.running_var += var * m

        self.sum.zero_()
        self.sum_squares.zero_()
        self.counter = 0
        self.tracked = 0

    def forward(self, input: Tensor) -> Tensor:
        if not self.training:

            

Reported by Pylint.

Redefining built-in 'input'
Error

Line: 51 Column: 32

                      self.tracked = 0
        self.chunks = chunks

    def _check_input_dim(self, input: Tensor) -> None:
        # It's the typical _check_input_dim() implementation in PyTorch.
        if input.dim() <= 2:
            raise ValueError("expected at least 3D input (got %dD input)" % input.dim())

    def _track(self, input: Tensor) -> bool:

            

Reported by Pylint.

torch/distributed/elastic/utils/distributed.py
16 issues
Use lazy % formatting in logging functions
Error

Line: 40 Column: 9

                      )

    if server_port != -1:
        log.info(f"sever_port: {server_port}, specified, ignoring retries")

    # only retry when server_port is NOT static
    attempt = retries if server_port == -1 else 1
    while True:
        if server_port != -1:

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 50 Column: 9

                      else:
            port = get_free_port()

        log.info(
            f"Creating c10d store on {server_addr}:{port}\n"
            f"  world_size  : {world_size}\n"
            f"  is_server   : {is_server}\n"
            f"  timeout(sec): {timeout}\n"
        )

            

Reported by Pylint.

TODO properly map the exceptions in pybind (c10d/init.cpp)
Error

Line: 73 Column: 3

                          # so we parse the error msg for now, interestingly this is how torch itself
            # detects timeouts and port conflicts in their own unittests
            # see - caffe2/torch/testing/_internal/common_utils.py
            # TODO properly map the exceptions in pybind (c10d/init.cpp)
            if _CONNECT_TIMEOUT in str(e) and not is_server:
                raise TimeoutError(
                    f"timed out waiting for tcp store's server: {server_addr}:{port}"
                ) from e
            elif str(e) == _ADDRESS_IN_USE:  # this will only happen on the server

            

Reported by Pylint.

Use lazy % formatting in logging functions
Error

Line: 80 Column: 21

                              ) from e
            elif str(e) == _ADDRESS_IN_USE:  # this will only happen on the server
                if attempt < retries:
                    log.warning(
                        f"port: {port} already in use, attempt: [{attempt}/{retries}]"
                    )
                    attempt += 1
                else:
                    raise IOError(

            

Reported by Pylint.

Redefining built-in 'type'
Error

Line: 135 Column: 17

                      host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM
    )
    for addr in addrs:
        family, type, proto, _, _ = addr
        s = socket.socket(family, type, proto)
        try:
            s.bind(("localhost", 0))
            s.listen(0)
            return s

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              #!/usr/bin/env python3

# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import socket

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 26 Column: 1

              _LAST_MEMBER_CHECKIN = "_tcp_store/last_member"


def create_c10d_store(
    is_server: bool,
    server_addr: str,
    server_port: int = -1,
    world_size: int = 1,
    timeout: float = (60 * 10),  # 10 min

            

Reported by Pylint.

Too many arguments (6/5)
Error

Line: 26 Column: 1

              _LAST_MEMBER_CHECKIN = "_tcp_store/last_member"


def create_c10d_store(
    is_server: bool,
    server_addr: str,
    server_port: int = -1,
    world_size: int = 1,
    timeout: float = (60 * 10),  # 10 min

            

Reported by Pylint.

Line too long (120/100)
Error

Line: 36 Column: 1

              ):
    if server_port == -1 and world_size > 1:
        raise ValueError(
            f"server_port must be specified when world_size > 1, got server_port={server_port}, world_size={world_size}"
        )

    if server_port != -1:
        log.info(f"sever_port: {server_port}, specified, ignoring retries")


            

Reported by Pylint.

Variable name "e" doesn't conform to snake_case naming style
Error

Line: 68 Column: 9

                          _check_full_rank(store, world_size)
            log.info("Successfully created c10d store")
            return store
        except RuntimeError as e:
            # this is brittle, but the underlying exception type is not properly pybinded
            # so we parse the error msg for now, interestingly this is how torch itself
            # detects timeouts and port conflicts in their own unittests
            # see - caffe2/torch/testing/_internal/common_utils.py
            # TODO properly map the exceptions in pybind (c10d/init.cpp)

            

Reported by Pylint.

test/test_openmp.py
16 issues
Unable to import 'torch'
Error

Line: 4 Column: 1

              import collections
import unittest

import torch
from torch.testing._internal.common_utils import (
    TestCase, run_tests, TEST_WITH_ASAN)

try:
    import psutil

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 5 Column: 1

              import unittest

import torch
from torch.testing._internal.common_utils import (
    TestCase, run_tests, TEST_WITH_ASAN)

try:
    import psutil
    HAS_PSUTIL = True

            

Reported by Pylint.

Unused variable 'n'
Error

Line: 37 Column: 13

                      p = psutil.Process()
        # warm up for 5 runs, then things should be stable for the last 5
        last_rss = collections.deque(maxlen=5)
        for n in range(10):
            for i in range(runs):
                self.model(self.x)
            last_rss.append(p.memory_info().rss)
        return last_rss


            

Reported by Pylint.

Unused variable 'i'
Error

Line: 38 Column: 17

                      # warm up for 5 runs, then things should be stable for the last 5
        last_rss = collections.deque(maxlen=5)
        for n in range(10):
            for i in range(runs):
                self.model(self.x)
            last_rss.append(p.memory_info().rss)
        return last_rss

    def func_rss(self, runs):

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              import collections
import unittest

import torch
from torch.testing._internal.common_utils import (
    TestCase, run_tests, TEST_WITH_ASAN)

try:
    import psutil

            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 17 Column: 1

              device = torch.device('cpu')


class Network(torch.nn.Module):
    maxp1 = torch.nn.MaxPool2d(1, 1)

    def forward(self, x):
        return self.maxp1(x)


            

Reported by Pylint.

Missing class docstring
Error

Line: 17 Column: 1

              device = torch.device('cpu')


class Network(torch.nn.Module):
    maxp1 = torch.nn.MaxPool2d(1, 1)

    def forward(self, x):
        return self.maxp1(x)


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 20 Column: 5

              class Network(torch.nn.Module):
    maxp1 = torch.nn.MaxPool2d(1, 1)

    def forward(self, x):
        return self.maxp1(x)


@unittest.skipIf(not HAS_PSUTIL, "Requires psutil to run")
@unittest.skipIf(TEST_WITH_ASAN, "Cannot test with ASAN")

            

Reported by Pylint.

Argument name "x" doesn't conform to snake_case naming style
Error

Line: 20 Column: 5

              class Network(torch.nn.Module):
    maxp1 = torch.nn.MaxPool2d(1, 1)

    def forward(self, x):
        return self.maxp1(x)


@unittest.skipIf(not HAS_PSUTIL, "Requires psutil to run")
@unittest.skipIf(TEST_WITH_ASAN, "Cannot test with ASAN")

            

Reported by Pylint.

Class name "TestOpenMP_ParallelFor" doesn't conform to PascalCase naming style
Error

Line: 26 Column: 1

              
@unittest.skipIf(not HAS_PSUTIL, "Requires psutil to run")
@unittest.skipIf(TEST_WITH_ASAN, "Cannot test with ASAN")
class TestOpenMP_ParallelFor(TestCase):
    batch = 20
    channels = 1
    side_dim = 80
    x = torch.randn([batch, channels, side_dim, side_dim], device=device)
    model = Network()

            

Reported by Pylint.

torch/distributions/uniform.py
16 issues
Module 'torch' has no 'Size' member
Error

Line: 45 Column: 27

                      self.low, self.high = broadcast_all(low, high)

        if isinstance(low, Number) and isinstance(high, Number):
            batch_shape = torch.Size()
        else:
            batch_shape = self.low.size()
        super(Uniform, self).__init__(batch_shape, validate_args=validate_args)

        if self._validate_args and not torch.lt(self.low, self.high).all():

            

Reported by Pylint.

Module 'torch' has no 'lt' member; maybe 'lu'?
Error

Line: 50 Column: 40

                          batch_shape = self.low.size()
        super(Uniform, self).__init__(batch_shape, validate_args=validate_args)

        if self._validate_args and not torch.lt(self.low, self.high).all():
            raise ValueError("Uniform is not defined when low>= high")

    def expand(self, batch_shape, _instance=None):
        new = self._get_checked_instance(Uniform, _instance)
        batch_shape = torch.Size(batch_shape)

            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 55 Column: 23

              
    def expand(self, batch_shape, _instance=None):
        new = self._get_checked_instance(Uniform, _instance)
        batch_shape = torch.Size(batch_shape)
        new.low = self.low.expand(batch_shape)
        new.high = self.high.expand(batch_shape)
        super(Uniform, new).__init__(batch_shape, validate_args=False)
        new._validate_args = self._validate_args
        return new

            

Reported by Pylint.

Module 'torch' has no 'Size' member
Error

Line: 66 Column: 36

                  def support(self):
        return constraints.interval(self.low, self.high)

    def rsample(self, sample_shape=torch.Size()):
        shape = self._extended_shape(sample_shape)
        rand = torch.rand(shape, dtype=self.low.dtype, device=self.low.device)
        return self.low + rand * (self.high - self.low)

    def log_prob(self, value):

            

Reported by Pylint.

Module 'torch' has no 'rand' member
Error

Line: 68 Column: 16

              
    def rsample(self, sample_shape=torch.Size()):
        shape = self._extended_shape(sample_shape)
        rand = torch.rand(shape, dtype=self.low.dtype, device=self.low.device)
        return self.low + rand * (self.high - self.low)

    def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)

            

Reported by Pylint.

Module 'torch' has no 'log' member
Error

Line: 76 Column: 40

                          self._validate_sample(value)
        lb = self.low.le(value).type_as(self.low)
        ub = self.high.gt(value).type_as(self.low)
        return torch.log(lb.mul(ub)) - torch.log(self.high - self.low)

    def cdf(self, value):
        if self._validate_args:
            self._validate_sample(value)
        result = (value - self.low) / (self.high - self.low)

            

Reported by Pylint.

Module 'torch' has no 'log' member
Error

Line: 76 Column: 16

                          self._validate_sample(value)
        lb = self.low.le(value).type_as(self.low)
        ub = self.high.gt(value).type_as(self.low)
        return torch.log(lb.mul(ub)) - torch.log(self.high - self.low)

    def cdf(self, value):
        if self._validate_args:
            self._validate_sample(value)
        result = (value - self.low) / (self.high - self.low)

            

Reported by Pylint.

Module 'torch' has no 'log' member
Error

Line: 89 Column: 16

                      return result

    def entropy(self):
        return torch.log(self.high - self.low)

            

Reported by Pylint.

Method 'enumerate_support' is abstract in class 'Distribution' but is not overridden
Error

Line: 9 Column: 1

              from torch.distributions.utils import broadcast_all


class Uniform(Distribution):
    r"""
    Generates uniformly distributed random samples from the half-open interval
    ``[low, high)``.

    Example::

            

Reported by Pylint.

TODO allow (loc,scale) parameterization to allow independent constraints.
Error

Line: 24 Column: 3

                      low (float or Tensor): lower range (inclusive).
        high (float or Tensor): upper range (exclusive).
    """
    # TODO allow (loc,scale) parameterization to allow independent constraints.
    arg_constraints = {'low': constraints.dependent(is_discrete=False, event_dim=0),
                       'high': constraints.dependent(is_discrete=False, event_dim=0)}
    has_rsample = True

    @property

            

Reported by Pylint.

tools/test/test_stats.py
16 issues
Unable to import 'tools.stats'
Error

Line: 5 Column: 1

              import unittest
from typing import Dict, List

from tools.stats import print_test_stats
from tools.stats.s3_stat_parser import (Commit, Report, ReportMetaMeta,
                                        Status, Version1Case,
                                        Version1Report, Version2Case,
                                        Version2Report)


            

Reported by Pylint.

Unable to import 'tools.stats.s3_stat_parser'
Error

Line: 6 Column: 1

              from typing import Dict, List

from tools.stats import print_test_stats
from tools.stats.s3_stat_parser import (Commit, Report, ReportMetaMeta,
                                        Status, Version1Case,
                                        Version1Report, Version2Case,
                                        Version2Report)



            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              # -*- coding: utf-8 -*-
import unittest
from typing import Dict, List

from tools.stats import print_test_stats
from tools.stats.s3_stat_parser import (Commit, Report, ReportMetaMeta,
                                        Status, Version1Case,
                                        Version1Report, Version2Case,
                                        Version2Report)

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 12 Column: 1

                                                      Version2Report)


def fakehash(char: str) -> str:
    return char * 40


def dummy_meta_meta() -> ReportMetaMeta:
    return {

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 16 Column: 1

                  return char * 40


def dummy_meta_meta() -> ReportMetaMeta:
    return {
        'build_pr': '',
        'build_tag': '',
        'build_sha1': '',
        'build_base_commit': '',

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 29 Column: 1

                  }


def makecase(
    name: str,
    seconds: float,
    *,
    errored: bool = False,
    failed: bool = False,

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 46 Column: 1

                  }


def make_report_v1(tests: Dict[str, List[Version1Case]]) -> Version1Report:
    suites = {
        suite_name: {
            'total_seconds': sum(case['seconds'] for case in cases),
            'cases': cases,
        }

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 61 Column: 1

                  }


def make_case_v2(seconds: float, status: Status = None) -> Version2Case:
    return {
        'seconds': seconds,
        'status': status,
    }


            

Reported by Pylint.

Missing function or method docstring
Error

Line: 68 Column: 1

                  }


def make_report_v2(tests: Dict[str, Dict[str, Dict[str, Version2Case]]]) -> Version2Report:
    files = {}
    for file_name, file_suites in tests.items():
        suites = {
            suite_name: {
                'total_seconds': sum(case['seconds'] for case in cases.values()),

            

Reported by Pylint.

Constant name "maxDiff" doesn't conform to UPPER_CASE naming style
Error

Line: 88 Column: 1

                      'total_seconds': sum(s['total_seconds'] for s in files.values()),
        'files': files,
    }
maxDiff = None

class TestPrintTestStats(unittest.TestCase):
    version1_report: Version1Report = make_report_v1({
        # input ordering of the suites is ignored
        'Grault': [

            

Reported by Pylint.

torch/distributed/optim/functional_adamax.py
16 issues
Module 'torch' has no 'tensor' member; maybe 'Tensor'?
Error

Line: 77 Column: 37

                              if param not in self.state:
                    self.state[param] = {}
                    state = self.state[param]
                    state['step'] = torch.tensor(0.0)
                    # Exponential moving average of gradient values
                    state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    # Exponential moving average of squared gradient values
                    state['exp_inf'] = torch.zeros_like(param, memory_format=torch.preserve_format)


            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 79 Column: 40

                                  state = self.state[param]
                    state['step'] = torch.tensor(0.0)
                    # Exponential moving average of gradient values
                    state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    # Exponential moving average of squared gradient values
                    state['exp_inf'] = torch.zeros_like(param, memory_format=torch.preserve_format)

                state = self.state[param]


            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 79 Column: 78

                                  state = self.state[param]
                    state['step'] = torch.tensor(0.0)
                    # Exponential moving average of gradient values
                    state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    # Exponential moving average of squared gradient values
                    state['exp_inf'] = torch.zeros_like(param, memory_format=torch.preserve_format)

                state = self.state[param]


            

Reported by Pylint.

Module 'torch' has no 'preserve_format' member
Error

Line: 81 Column: 78

                                  # Exponential moving average of gradient values
                    state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    # Exponential moving average of squared gradient values
                    state['exp_inf'] = torch.zeros_like(param, memory_format=torch.preserve_format)

                state = self.state[param]

                exp_avgs.append(state['exp_avg'])
                exp_infs.append(state['exp_inf'])

            

Reported by Pylint.

Module 'torch' has no 'zeros_like' member
Error

Line: 81 Column: 40

                                  # Exponential moving average of gradient values
                    state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
                    # Exponential moving average of squared gradient values
                    state['exp_inf'] = torch.zeros_like(param, memory_format=torch.preserve_format)

                state = self.state[param]

                exp_avgs.append(state['exp_avg'])
                exp_infs.append(state['exp_inf'])

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              from typing import List, Dict, Optional, Tuple
import torch
import torch.optim._functional as F

from torch import Tensor

# Define a TorchScript compatible Functional Adamax Optimizer
# where we use these optimizer in a functional way.
# Instead of using the `param.grad` when updating parameters,

            

Reported by Pylint.

Too few public methods (1/2)
Error

Line: 17 Column: 1

              # NOTE: This should be only used by distributed optimizer internals
# and not meant to expose to the user.
@torch.jit.script
class _FunctionalAdamax(object):
    def __init__(
        self,
        params: List[Tensor],
        lr: float = 1e-3,
        betas: Tuple[float, float] = (0.9, 0.999),

            

Reported by Pylint.

Class '_FunctionalAdamax' inherits from object, can be safely removed from bases in python3
Error

Line: 17 Column: 1

              # NOTE: This should be only used by distributed optimizer internals
# and not meant to expose to the user.
@torch.jit.script
class _FunctionalAdamax(object):
    def __init__(
        self,
        params: List[Tensor],
        lr: float = 1e-3,
        betas: Tuple[float, float] = (0.9, 0.999),

            

Reported by Pylint.

Too many arguments (7/5)
Error

Line: 18 Column: 5

              # and not meant to expose to the user.
@torch.jit.script
class _FunctionalAdamax(object):
    def __init__(
        self,
        params: List[Tensor],
        lr: float = 1e-3,
        betas: Tuple[float, float] = (0.9, 0.999),
        eps: float = 1e-8,

            

Reported by Pylint.

Comparison should be lr >= 0.0
Error

Line: 27 Column: 16

                      weight_decay: float = 0.0,
        _allow_empty_param_list: bool = False,
    ):
        if not 0.0 <= lr:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if not 0.0 <= eps:
            raise ValueError("Invalid epsilon value: {}".format(eps))
        if not 0.0 <= betas[0] < 1.0:
            raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))

            

Reported by Pylint.

torch/distributed/_sharding_spec/api.py
15 issues
Attempted relative import beyond top-level package
Error

Line: 6 Column: 1

              from typing import List, Union
import torch

from ._internals import (
    ShardMetadata,
    validate_non_overlapping_shards_metadata
)

class PlacementSpec(ABC):

            

Reported by Pylint.

Unnecessary pass statement
Error

Line: 17 Column: 5

                  class can be used to specify customized placements which might not be
    covered by existing APIs.
    """
    pass


@dataclass
class DevicePlacementSpec(PlacementSpec):
    """

            

Reported by Pylint.

Access to a protected member _remote_device of a client class
Error

Line: 29 Column: 13

                      device(:class:`torch.distributed._remote_device`): The device to place the entity on.
    """

    device: torch.distributed._remote_device

    def __post_init__(self):
        if not isinstance(self.device, torch.distributed._remote_device):
            self.device = torch.distributed._remote_device(self.device)


            

Reported by Pylint.

Access to a protected member _remote_device of a client class
Error

Line: 32 Column: 40

                  device: torch.distributed._remote_device

    def __post_init__(self):
        if not isinstance(self.device, torch.distributed._remote_device):
            self.device = torch.distributed._remote_device(self.device)


class ShardingSpec(PlacementSpec):
    """

            

Reported by Pylint.

Access to a protected member _remote_device of a client class
Error

Line: 33 Column: 27

              
    def __post_init__(self):
        if not isinstance(self.device, torch.distributed._remote_device):
            self.device = torch.distributed._remote_device(self.device)


class ShardingSpec(PlacementSpec):
    """
    Base class representing sharding specifications. It is special type of

            

Reported by Pylint.

Unnecessary pass statement
Error

Line: 41 Column: 5

                  Base class representing sharding specifications. It is special type of
    PlacementSpec.
    """
    pass


@dataclass
class ChunkShardingSpec(ShardingSpec):
    """

            

Reported by Pylint.

Access to a protected member _remote_device of a client class
Error

Line: 74 Column: 28

                  ShardingDim = Union[int, str]

    dim: ShardingDim
    placements: List[Union[torch.distributed._remote_device, str]]

    def __post_init__(self):
        self._verify_dim(self.dim)
        for i, remote_device in enumerate(self.placements):
            if not isinstance(remote_device, torch.distributed._remote_device):

            

Reported by Pylint.

Access to a protected member _remote_device of a client class
Error

Line: 79 Column: 46

                  def __post_init__(self):
        self._verify_dim(self.dim)
        for i, remote_device in enumerate(self.placements):
            if not isinstance(remote_device, torch.distributed._remote_device):
                self.placements[i] = torch.distributed._remote_device(remote_device)

    @staticmethod
    def _verify_dim(dim):
        if not (isinstance(dim, int) or isinstance(dim, str)):

            

Reported by Pylint.

Access to a protected member _remote_device of a client class
Error

Line: 80 Column: 38

                      self._verify_dim(self.dim)
        for i, remote_device in enumerate(self.placements):
            if not isinstance(remote_device, torch.distributed._remote_device):
                self.placements[i] = torch.distributed._remote_device(remote_device)

    @staticmethod
    def _verify_dim(dim):
        if not (isinstance(dim, int) or isinstance(dim, str)):
            raise ValueError(f'{dim} needs to either be an int or str')

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              from abc import ABC
from dataclasses import dataclass
from typing import List, Union
import torch

from ._internals import (
    ShardMetadata,
    validate_non_overlapping_shards_metadata
)

            

Reported by Pylint.

test/test_function_schema.py
15 issues
Unable to import 'torch'
Error

Line: 2 Column: 1

              
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch._C import parse_schema


class TestFunctionSchema(TestCase):
    def test_serialize_and_deserialize(self):
        schemas = torch._C._jit_get_all_schemas()

            

Reported by Pylint.

Unable to import 'torch.testing._internal.common_utils'
Error

Line: 3 Column: 1

              
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch._C import parse_schema


class TestFunctionSchema(TestCase):
    def test_serialize_and_deserialize(self):
        schemas = torch._C._jit_get_all_schemas()

            

Reported by Pylint.

Unable to import 'torch._C'
Error

Line: 4 Column: 1

              
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch._C import parse_schema


class TestFunctionSchema(TestCase):
    def test_serialize_and_deserialize(self):
        schemas = torch._C._jit_get_all_schemas()

            

Reported by Pylint.

Access to a protected member _C of a client class
Error

Line: 9 Column: 19

              
class TestFunctionSchema(TestCase):
    def test_serialize_and_deserialize(self):
        schemas = torch._C._jit_get_all_schemas()
        # so far we have around 1700 registered schemas
        self.assertGreater(len(schemas), 1000)
        for schema in schemas:
            parsed_schema = parse_schema(str(schema))
            self.assertEqual(parsed_schema, schema)

            

Reported by Pylint.

Access to a protected member _jit_get_all_schemas of a client class
Error

Line: 9 Column: 19

              
class TestFunctionSchema(TestCase):
    def test_serialize_and_deserialize(self):
        schemas = torch._C._jit_get_all_schemas()
        # so far we have around 1700 registered schemas
        self.assertGreater(len(schemas), 1000)
        for schema in schemas:
            parsed_schema = parse_schema(str(schema))
            self.assertEqual(parsed_schema, schema)

            

Reported by Pylint.

Unused variable 'schema'
Error

Line: 96 Column: 13

              
    def test_schema_error(self):
        with self.assertRaisesRegex(RuntimeError, r"schemas with vararg \(...\) can't have default value args"):
            schema = parse_schema("any.foo(int arg1, int arg2=0, ...)")

if __name__ == '__main__':
    run_tests()

            

Reported by Pylint.

Missing module docstring
Error

Line: 1 Column: 1

              
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch._C import parse_schema


class TestFunctionSchema(TestCase):
    def test_serialize_and_deserialize(self):
        schemas = torch._C._jit_get_all_schemas()

            

Reported by Pylint.

Missing class docstring
Error

Line: 7 Column: 1

              from torch._C import parse_schema


class TestFunctionSchema(TestCase):
    def test_serialize_and_deserialize(self):
        schemas = torch._C._jit_get_all_schemas()
        # so far we have around 1700 registered schemas
        self.assertGreater(len(schemas), 1000)
        for schema in schemas:

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 8 Column: 5

              

class TestFunctionSchema(TestCase):
    def test_serialize_and_deserialize(self):
        schemas = torch._C._jit_get_all_schemas()
        # so far we have around 1700 registered schemas
        self.assertGreater(len(schemas), 1000)
        for schema in schemas:
            parsed_schema = parse_schema(str(schema))

            

Reported by Pylint.

Missing function or method docstring
Error

Line: 17 Column: 5

                          self.assertEqual(parsed_schema, schema)
            self.assertTrue(parsed_schema.is_backward_compatible_with(schema))

    def test_backward_compatible_structure(self):
        old_schema = parse_schema('any.over(Tensor self, *, Tensor b) -> Tensor')
        # BC: A new schema without changes.
        new_schema = parse_schema('any.over(Tensor self, *, Tensor b) -> Tensor')
        self.assertTrue(new_schema.is_backward_compatible_with(old_schema))
        # No-BC: A new schema with different name.

            

Reported by Pylint.