The following issues were found
test/test_futures.py
54 issues
Line: 3
Column: 1
import threading
import time
import torch
import unittest
from torch.futures import Future
from torch.testing._internal.common_utils import IS_WINDOWS, TestCase, TemporaryFileName, run_tests
from typing import TypeVar
T = TypeVar("T")
Reported by Pylint.
Line: 5
Column: 1
import time
import torch
import unittest
from torch.futures import Future
from torch.testing._internal.common_utils import IS_WINDOWS, TestCase, TemporaryFileName, run_tests
from typing import TypeVar
T = TypeVar("T")
Reported by Pylint.
Line: 6
Column: 1
import torch
import unittest
from torch.futures import Future
from torch.testing._internal.common_utils import IS_WINDOWS, TestCase, TemporaryFileName, run_tests
from typing import TypeVar
T = TypeVar("T")
Reported by Pylint.
Line: 1
Column: 1
import threading
import time
import torch
import unittest
from torch.futures import Future
from torch.testing._internal.common_utils import IS_WINDOWS, TestCase, TemporaryFileName, run_tests
from typing import TypeVar
T = TypeVar("T")
Reported by Pylint.
Line: 4
Column: 1
import threading
import time
import torch
import unittest
from torch.futures import Future
from torch.testing._internal.common_utils import IS_WINDOWS, TestCase, TemporaryFileName, run_tests
from typing import TypeVar
T = TypeVar("T")
Reported by Pylint.
Line: 7
Column: 1
import unittest
from torch.futures import Future
from torch.testing._internal.common_utils import IS_WINDOWS, TestCase, TemporaryFileName, run_tests
from typing import TypeVar
T = TypeVar("T")
def add_one(fut):
Reported by Pylint.
Line: 9
Column: 1
from torch.testing._internal.common_utils import IS_WINDOWS, TestCase, TemporaryFileName, run_tests
from typing import TypeVar
T = TypeVar("T")
def add_one(fut):
return fut.wait() + 1
Reported by Pylint.
Line: 12
Column: 1
T = TypeVar("T")
def add_one(fut):
return fut.wait() + 1
class TestFuture(TestCase):
def test_set_exception(self) -> None:
Reported by Pylint.
Line: 16
Column: 1
return fut.wait() + 1
class TestFuture(TestCase):
def test_set_exception(self) -> None:
# This test is to ensure errors can propagate across futures.
error_msg = "Intentional Value Error"
value_error = ValueError(error_msg)
Reported by Pylint.
Line: 16
Column: 1
return fut.wait() + 1
class TestFuture(TestCase):
def test_set_exception(self) -> None:
# This test is to ensure errors can propagate across futures.
error_msg = "Intentional Value Error"
value_error = ValueError(error_msg)
Reported by Pylint.
caffe2/python/operator_test/wngrad_test.py
54 issues
Line: 10
Column: 1
import logging
import hypothesis
from hypothesis import given, settings, HealthCheck
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
Reported by Pylint.
Line: 11
Column: 1
import logging
import hypothesis
from hypothesis import given, settings, HealthCheck
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 12
Column: 1
import hypothesis
from hypothesis import given, settings, HealthCheck
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 40
Column: 18
def wngrad_sparse_test_helper(parent_test, inputs, seq_b, lr, epsilon,
engine, gc, dc):
# helper functions for wngrad operator test
param, grad = inputs
seq_b = np.array([seq_b, ], dtype=np.float32)
lr = np.array([lr], dtype=np.float32)
Reported by Pylint.
Line: 90
Column: 70
allow_nan=False, allow_infinity=False),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_wngrad_dense_base(self, inputs, seq_b, lr, epsilon, gc, dc):
param, grad = inputs
seq_b = np.array([seq_b, ], dtype=np.float32)
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator(
Reported by Pylint.
Line: 118
Column: 64
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_wngrad_dense_output_effective_lr(self, inputs, seq_b,
lr, epsilon, gc, dc):
param, grad = inputs
seq_b = np.array([seq_b, ], dtype=np.float32)
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator(
Reported by Pylint.
Line: 147
Column: 51
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_wngrad_dense_output_effective_lr_and_update(
self, inputs, seq_b, lr, epsilon, gc, dc):
param, grad = inputs
seq_b = np.abs(np.array([seq_b, ], dtype=np.float32))
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator(
Reported by Pylint.
Line: 190
Column: 72
allow_nan=False, allow_infinity=False),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_sparse_wngrad_empty(self, inputs, seq_b, lr, epsilon, gc, dc):
param = inputs[0]
seq_b = np.array([seq_b, ], dtype=np.float32)
lr = np.array([lr], dtype=np.float32)
grad = np.empty(shape=(0,) + param.shape[1:], dtype=np.float32)
Reported by Pylint.
Line: 207
Column: 47
epsilon=epsilon,
device_option=gc)
def ref_sparse(param, seq_b, indices, grad, lr):
param_out = np.copy(param)
seq_b_out = np.copy(seq_b)
return (param_out, seq_b_out)
print('test_sparse_adagrad_empty with full precision embedding')
Reported by Pylint.
Line: 207
Column: 53
epsilon=epsilon,
device_option=gc)
def ref_sparse(param, seq_b, indices, grad, lr):
param_out = np.copy(param)
seq_b_out = np.copy(seq_b)
return (param_out, seq_b_out)
print('test_sparse_adagrad_empty with full precision embedding')
Reported by Pylint.
.circleci/cimodel/data/pytorch_build_definitions.py
54 issues
Line: 21
Column: 3
pyver: Optional[str] = None
cuda_version: Optional[str] = None
rocm_version: Optional[str] = None
# TODO expand this to cover all the USE_* that we want to test for
# tesnrorrt, leveldb, lmdb, redis, opencv, mkldnn, ideep, etc.
# (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453608)
is_xla: bool = False
is_vulkan: bool = False
is_pure_torch: bool = False
Reported by Pylint.
Line: 40
Column: 3
def is_test_phase(phase):
return "test" in phase
# TODO: Eliminate the special casing for docker paths
# In the short term, we *will* need to support special casing as docker images are merged for caffe2 and pytorch
def get_parms(self, for_docker):
leading = []
# We just don't run non-important jobs on pull requests;
# previously we also named them in a way to make it obvious
Reported by Pylint.
Line: 126
Column: 3
if Conf.is_test_phase(phase):
# TODO When merging the caffe2 and pytorch jobs, it might be convenient for a while to make a
# caffe2 test job dependent on a pytorch build job. This way we could quickly dedup the repeated
# build of pytorch in the caffe2 build job, and just run the caffe2 tests off of a completed
# pytorch build job (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259452641)
dependency_build = self.parent_build or self
Reported by Pylint.
Line: 145
Column: 3
return {job_name: job_def}
# TODO This is a hack to special case some configs just for the workflow list
class HiddenConf(object):
def __init__(self, name, parent_build=None, filters=None):
self.name = name
self.parent_build = parent_build
self.filters = filters
Reported by Pylint.
Line: 169
Column: 32
self.parent_build = parent_build
self.branch = branch
def gen_workflow_job(self, phase):
return {
"pytorch_doc_push": {
"name": self.name,
"branch": self.branch,
"requires": [self.parent_build],
Reported by Pylint.
Line: 181
Column: 3
}
}
# TODO Convert these to graph nodes
def gen_dependent_configs(xenial_parent_config):
extra_parms = [
(["multigpu"], "large"),
(["nogpu", "NO_AVX2"], None),
Reported by Pylint.
Line: 307
Column: 3
elif compiler_name == "android":
android_ndk_version = fc.find_prop("compiler_version")
# TODO: do we need clang to compile host binaries like protoc?
parms_list.append("clang5")
parms_list.append("android-ndk-" + android_ndk_version)
android_abi = fc.find_prop("android_abi")
parms_list_ignored_for_docker_image.append(android_abi)
restrict_phases = ["build"]
Reported by Pylint.
Line: 345
Column: 3
parallel_backend = fc.find_prop("parallel_backend") or None
build_only = fc.find_prop("build_only") or False
shard_test = fc.find_prop("shard_test") or False
# TODO: fix pure_torch python test packaging issue.
if shard_test:
restrict_phases = ["build"] if restrict_phases is None else restrict_phases
restrict_phases.extend(["test1", "test2"])
if build_only or is_pure_torch:
restrict_phases = ["build"]
Reported by Pylint.
Line: 380
Column: 3
# run docs builds on "pytorch-linux-xenial-py3.6-gcc5.4". Docs builds
# should run on a CPU-only build that runs on all PRs.
# XXX should this be updated to a more modern build? Projects are
# beginning to drop python3.6
if (
distro_name == "xenial"
and fc.find_prop("pyver") == "3.6"
and cuda_version is None
Reported by Pylint.
Line: 392
Column: 13
and compiler_name == "gcc"
and fc.find_prop("compiler_version") == "5.4"
):
c.filters = gen_filter_dict(branches_list=r"/.*/",
tags_list=RC_PATTERN)
c.dependent_tests = gen_docs_configs(c)
if cuda_version == "10.2" and python_version == "3.6" and not is_libtorch and not is_slow_gradcheck:
c.dependent_tests = gen_dependent_configs(c)
Reported by Pylint.
torch/storage.py
54 issues
Line: 4
Column: 1
import io
import torch
from ._utils import _type, _cuda
from typing import Any, TypeVar, Type
T = TypeVar('T', bound='_StorageBase')
class _StorageBase(object):
_cdata: Any
Reported by Pylint.
Line: 14
Column: 26
is_sparse: bool = False
def __init__(self, *args, **kwargs): ... # noqa: E704
def __len__(self) -> int: ... # noqa: E704
def __getitem__(self, idx): ... # noqa: E704
def copy_(self, source: T) -> T: ... # noqa: E704
def size(self) -> int: ... # noqa: E704
def type(self, dtype: str = None, non_blocking: bool = False) -> T: ... # noqa: E704
def cuda(self, device=None, non_blocking=False, **kwargs) -> T: ... # noqa: E704
Reported by Pylint.
Line: 127
Column: 21
if self.is_cuda:
raise TypeError(f"cannot pin '{self.type()}' only CPU memory can be pinned")
import torch.cuda
allocator = torch.cuda._host_allocator() # type: ignore[attr-defined]
return type(self)(self.size(), allocator=allocator).copy_(self)
def share_memory_(self):
"""Moves the storage to shared memory.
Reported by Pylint.
Line: 13
Column: 1
is_cuda: bool = False
is_sparse: bool = False
def __init__(self, *args, **kwargs): ... # noqa: E704
def __len__(self) -> int: ... # noqa: E704
def __getitem__(self, idx): ... # noqa: E704
def copy_(self, source: T) -> T: ... # noqa: E704
def size(self) -> int: ... # noqa: E704
def type(self, dtype: str = None, non_blocking: bool = False) -> T: ... # noqa: E704
Reported by Pylint.
Line: 13
Column: 1
is_cuda: bool = False
is_sparse: bool = False
def __init__(self, *args, **kwargs): ... # noqa: E704
def __len__(self) -> int: ... # noqa: E704
def __getitem__(self, idx): ... # noqa: E704
def copy_(self, source: T) -> T: ... # noqa: E704
def size(self) -> int: ... # noqa: E704
def type(self, dtype: str = None, non_blocking: bool = False) -> T: ... # noqa: E704
Reported by Pylint.
Line: 16
Column: 21
def __init__(self, *args, **kwargs): ... # noqa: E704
def __len__(self) -> int: ... # noqa: E704
def __getitem__(self, idx): ... # noqa: E704
def copy_(self, source: T) -> T: ... # noqa: E704
def size(self) -> int: ... # noqa: E704
def type(self, dtype: str = None, non_blocking: bool = False) -> T: ... # noqa: E704
def cuda(self, device=None, non_blocking=False, **kwargs) -> T: ... # noqa: E704
def element_size(self) -> int: ... # noqa: E704
def get_device(self) -> int: ... # noqa: E704
Reported by Pylint.
Line: 18
Column: 20
def __getitem__(self, idx): ... # noqa: E704
def copy_(self, source: T) -> T: ... # noqa: E704
def size(self) -> int: ... # noqa: E704
def type(self, dtype: str = None, non_blocking: bool = False) -> T: ... # noqa: E704
def cuda(self, device=None, non_blocking=False, **kwargs) -> T: ... # noqa: E704
def element_size(self) -> int: ... # noqa: E704
def get_device(self) -> int: ... # noqa: E704
# Defined in torch/csrc/generic/StorageSharing.cpp
Reported by Pylint.
Line: 18
Column: 39
def __getitem__(self, idx): ... # noqa: E704
def copy_(self, source: T) -> T: ... # noqa: E704
def size(self) -> int: ... # noqa: E704
def type(self, dtype: str = None, non_blocking: bool = False) -> T: ... # noqa: E704
def cuda(self, device=None, non_blocking=False, **kwargs) -> T: ... # noqa: E704
def element_size(self) -> int: ... # noqa: E704
def get_device(self) -> int: ... # noqa: E704
# Defined in torch/csrc/generic/StorageSharing.cpp
Reported by Pylint.
Line: 19
Column: 33
def copy_(self, source: T) -> T: ... # noqa: E704
def size(self) -> int: ... # noqa: E704
def type(self, dtype: str = None, non_blocking: bool = False) -> T: ... # noqa: E704
def cuda(self, device=None, non_blocking=False, **kwargs) -> T: ... # noqa: E704
def element_size(self) -> int: ... # noqa: E704
def get_device(self) -> int: ... # noqa: E704
# Defined in torch/csrc/generic/StorageSharing.cpp
def _share_filename_(self): ... # noqa: E704
Reported by Pylint.
Line: 19
Column: 1
def copy_(self, source: T) -> T: ... # noqa: E704
def size(self) -> int: ... # noqa: E704
def type(self, dtype: str = None, non_blocking: bool = False) -> T: ... # noqa: E704
def cuda(self, device=None, non_blocking=False, **kwargs) -> T: ... # noqa: E704
def element_size(self) -> int: ... # noqa: E704
def get_device(self) -> int: ... # noqa: E704
# Defined in torch/csrc/generic/StorageSharing.cpp
def _share_filename_(self): ... # noqa: E704
Reported by Pylint.
caffe2/contrib/aten/aten_test.py
54 issues
Line: 7
Column: 1
from caffe2.python import core, dyndep
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 10
Column: 1
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestATen(hu.HypothesisTestCase):
Reported by Pylint.
Line: 6
Column: 1
from caffe2.python import core, dyndep
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 17
Column: 36
class TestATen(hu.HypothesisTestCase):
@given(inputs=hu.tensors(n=2), **hu.gcs)
def test_add(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["X", "Y"],
["Z"],
operator="add")
Reported by Pylint.
Line: 29
Column: 41
self.assertReferenceChecks(gc, op, inputs, ref)
@given(inputs=hu.tensors(n=2, dtype=np.float16), **hu.gcs_gpu_only)
def test_add_half(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["X", "Y"],
["Z"],
operator="add")
Reported by Pylint.
Line: 41
Column: 36
self.assertReferenceChecks(gc, op, inputs, ref)
@given(inputs=hu.tensors(n=1), **hu.gcs)
def test_pow(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["S"],
["Z"],
operator="pow", exponent=2.0)
Reported by Pylint.
Line: 54
Column: 32
self.assertReferenceChecks(gc, op, inputs, ref)
@given(x=st.integers(min_value=2, max_value=8), **hu.gcs)
def test_sort(self, x, gc, dc):
inputs = [np.random.permutation(x)]
op = core.CreateOperator(
"ATen",
["S"],
["Z", "I"],
Reported by Pylint.
Line: 67
Column: 36
self.assertReferenceChecks(gc, op, inputs, ref)
@given(inputs=hu.tensors(n=1), **hu.gcs)
def test_sum(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["S"],
["Z"],
operator="sum")
Reported by Pylint.
Line: 80
Column: 36
self.assertReferenceChecks(gc, op, inputs, ref)
@given(**hu.gcs)
def test_index_uint8(self, gc, dc):
# Indexing with uint8 is deprecated, but we need to provide backward compatibility for some old models exported through ONNX
op = core.CreateOperator(
"ATen",
['self', 'mask'],
["Z"],
Reported by Pylint.
Line: 97
Column: 34
self.assertReferenceChecks(gc, op, [tensor, mask], ref)
@given(**hu.gcs)
def test_index_put(self, gc, dc):
op = core.CreateOperator(
"ATen",
['self', 'indices', 'values'],
["Z"],
operator="index_put")
Reported by Pylint.
torch/profiler/profiler.py
54 issues
Line: 63
Column: 46
"""
return ProfilerAction.RECORD
def tensorboard_trace_handler(dir_name: str, worker_name: Optional[str] = None, use_gzip: bool = False):
"""
Outputs tracing files to directory of ``dir_name``, then that directory can be
directly delivered to tensorboard as logdir.
``worker_name`` should be unique for each worker in distributed scenario,
it will be set to '[hostname]_[pid]' by default.
Reported by Pylint.
Line: 70
Column: 5
``worker_name`` should be unique for each worker in distributed scenario,
it will be set to '[hostname]_[pid]' by default.
"""
import os
import socket
import time
def handler_fn(prof) -> None:
nonlocal worker_name
Reported by Pylint.
Line: 70
Column: 5
``worker_name`` should be unique for each worker in distributed scenario,
it will be set to '[hostname]_[pid]' by default.
"""
import os
import socket
import time
def handler_fn(prof) -> None:
nonlocal worker_name
Reported by Pylint.
Line: 74
Column: 20
import socket
import time
def handler_fn(prof) -> None:
nonlocal worker_name
if not os.path.isdir(dir_name):
try:
os.makedirs(dir_name, exist_ok=True)
except Exception:
Reported by Pylint.
Line: 80
Column: 17
try:
os.makedirs(dir_name, exist_ok=True)
except Exception:
raise RuntimeError("Can't create directory: " + dir_name)
if not worker_name:
worker_name = "{}_{}".format(socket.gethostname(), str(os.getpid()))
file_name = "{}.{}.pt.trace.json".format(worker_name, int(time.time() * 1000))
if use_gzip:
file_name = file_name + '.gz'
Reported by Pylint.
Line: 100
Column: 12
This, in turn, results in including CUDA time in the profiler table output,
but not in the JSON trace.
"""
return torch.autograd._supported_activities()
class profile(object):
"""Profiler context manager.
Reported by Pylint.
Line: 213
Column: 13
self,
*,
activities: Optional[Iterable[ProfilerActivity]] = None,
schedule: Optional[Callable[[int], ProfilerAction]] = None,
on_trace_ready: Optional[Callable[..., Any]] = None,
record_shapes: bool = False,
profile_memory: bool = False,
with_stack: bool = False,
with_flops: bool = False,
Reported by Pylint.
Line: 391
Column: 9
into the trace file
"""
wrapped_value = "\"" + value.replace('"', '\\"') + "\""
torch.autograd._add_metadata_json(key, wrapped_value)
def add_metadata_json(self, key: str, value: str):
"""
Adds a user defined metadata with a string key and a valid json value
into the trace file
Reported by Pylint.
Line: 398
Column: 9
Adds a user defined metadata with a string key and a valid json value
into the trace file
"""
torch.autograd._add_metadata_json(key, value)
def _get_distributed_info(self):
import torch.distributed as dist
if not dist.is_available() or not dist.is_initialized():
return None
Reported by Pylint.
Line: 440
Column: 9
with_modules=self.with_modules,
use_kineto=True,
)
self.profiler._prepare_trace()
def _start_trace(self):
assert self.profiler is not None
self.profiler._start_trace()
Reported by Pylint.
torch/optim/_multi_tensor/adamw.py
53 issues
Line: 3
Column: 1
import math
import torch
from ..optimizer import Optimizer
from collections import defaultdict
class AdamW(Optimizer):
r"""Implements AdamW algorithm.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
Reported by Pylint.
Line: 95
Column: 40
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
Reported by Pylint.
Line: 95
Column: 74
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
Reported by Pylint.
Line: 97
Column: 43
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
Reported by Pylint.
Line: 97
Column: 77
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
Reported by Pylint.
Line: 100
Column: 85
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
exp_avg_sq.append(state['exp_avg_sq'])
if amsgrad:
Reported by Pylint.
Line: 100
Column: 51
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
exp_avg_sq.append(state['exp_avg_sq'])
if amsgrad:
Reported by Pylint.
Line: 119
Column: 13
#
# Decay the first and second moment running average coefficient
#
torch._foreach_mul_(exp_avg, beta1)
torch._foreach_add_(exp_avg, grads, alpha=1 - beta1)
torch._foreach_mul_(exp_avg_sq, beta2)
torch._foreach_addcmul_(exp_avg_sq, grads, grads, 1 - beta2)
Reported by Pylint.
Line: 120
Column: 13
# Decay the first and second moment running average coefficient
#
torch._foreach_mul_(exp_avg, beta1)
torch._foreach_add_(exp_avg, grads, alpha=1 - beta1)
torch._foreach_mul_(exp_avg_sq, beta2)
torch._foreach_addcmul_(exp_avg_sq, grads, grads, 1 - beta2)
if amsgrad:
Reported by Pylint.
Line: 122
Column: 13
torch._foreach_mul_(exp_avg, beta1)
torch._foreach_add_(exp_avg, grads, alpha=1 - beta1)
torch._foreach_mul_(exp_avg_sq, beta2)
torch._foreach_addcmul_(exp_avg_sq, grads, grads, 1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
max_exp_avg_sq = torch._foreach_maximum(max_exp_avg_sq, exp_avg_sq)
Reported by Pylint.
test/distributed/pipeline/sync/skip/test_tracker.py
53 issues
Line: 10
Column: 1
from queue import Queue
import threading
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync.checkpoint import enable_checkpointing, enable_recomputing
from torch.distributed.pipeline.sync.microbatch import Batch
Reported by Pylint.
Line: 11
Column: 1
import threading
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync.checkpoint import enable_checkpointing, enable_recomputing
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
Reported by Pylint.
Line: 12
Column: 1
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync.checkpoint import enable_checkpointing, enable_recomputing
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.layout import SkipLayout
Reported by Pylint.
Line: 14
Column: 1
import torch
from torch import nn
from torch.distributed.pipeline.sync.checkpoint import enable_checkpointing, enable_recomputing
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.layout import SkipLayout
from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, SkipTrackerThroughPotals, current_skip_tracker
Reported by Pylint.
Line: 15
Column: 1
from torch import nn
from torch.distributed.pipeline.sync.checkpoint import enable_checkpointing, enable_recomputing
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.layout import SkipLayout
from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, SkipTrackerThroughPotals, current_skip_tracker
Reported by Pylint.
Line: 16
Column: 1
from torch.distributed.pipeline.sync.checkpoint import enable_checkpointing, enable_recomputing
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.layout import SkipLayout
from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, SkipTrackerThroughPotals, current_skip_tracker
def test_default_skip_tracker():
Reported by Pylint.
Line: 17
Column: 1
from torch.distributed.pipeline.sync.checkpoint import enable_checkpointing, enable_recomputing
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.layout import SkipLayout
from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, SkipTrackerThroughPotals, current_skip_tracker
def test_default_skip_tracker():
q = Queue()
Reported by Pylint.
Line: 18
Column: 1
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.layout import SkipLayout
from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, SkipTrackerThroughPotals, current_skip_tracker
def test_default_skip_tracker():
q = Queue()
Reported by Pylint.
Line: 41
Column: 27
def test_default_skip_tracker_by_data_parallel():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa: B901
@skippable(pop=["foo"])
class Pop(nn.Module):
Reported by Pylint.
Line: 47
Column: 27
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo
model = nn.Sequential(Stash(), Pop())
model = nn.DataParallel(model, device_ids=[0, 0], output_device=0)
Reported by Pylint.
caffe2/python/operator_test/gather_ops_test.py
53 issues
Line: 8
Column: 1
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import hypothesis.extra.numpy as hnp
Reported by Pylint.
Line: 11
Column: 1
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import hypothesis.extra.numpy as hnp
# Basic implementation of gather for axis == 0, shich is lookup of indices
# in the outer dimension. Keeping it for reference here, although is similar
# to more general function below.
Reported by Pylint.
Line: 12
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import hypothesis.extra.numpy as hnp
# Basic implementation of gather for axis == 0, shich is lookup of indices
# in the outer dimension. Keeping it for reference here, although is similar
# to more general function below.
def ref_gather_axis0():
Reported by Pylint.
Line: 197
Column: 49
@given(inputs=_inputs(),
**hu.gcs)
@settings(deadline=10000)
def test_batch_gather_ops(self, inputs, gc, dc):
data, ind = inputs
op = core.CreateOperator(
'BatchGather',
['data', 'ind'],
['output'])
Reported by Pylint.
Line: 213
Column: 68
index_num=st.integers(0, 5000),
**hu.gcs)
@settings(deadline=10000)
def test_batch_gather_ops(self, rows_num, cols_num, index_num, gc, dc):
data = np.random.random((rows_num, cols_num)).astype(np.float32)
ind = np.random.randint(rows_num, size=(index_num, )).astype('int32')
net = core.Net("bench")
Reported by Pylint.
Line: 213
Column: 72
index_num=st.integers(0, 5000),
**hu.gcs)
@settings(deadline=10000)
def test_batch_gather_ops(self, rows_num, cols_num, index_num, gc, dc):
data = np.random.random((rows_num, cols_num)).astype(np.float32)
ind = np.random.randint(rows_num, size=(index_num, )).astype('int32')
net = core.Net("bench")
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 17
Column: 1
# Basic implementation of gather for axis == 0, shich is lookup of indices
# in the outer dimension. Keeping it for reference here, although is similar
# to more general function below.
def ref_gather_axis0():
def inner(data, ind):
if ind.size == 0 or data.shape[0] == 0:
return [np.zeros((0, 10, 20)).astype(np.float32)]
output = [data[i] for i in ind]
return [output]
Reported by Pylint.
Line: 27
Column: 1
# Returns axis-based lookup. We just use numpy take() which handles different
# axis values as we want.
def ref_gather(axis):
def inner(data, ind):
if ind.size == 0 or data.shape[axis] == 0:
shape = list(data.shape)
shape[0] = 0
return [np.zeros(tuple(shape)).astype(np.float32)]
Reported by Pylint.
Line: 39
Column: 1
return inner
# Gather(..., match_outer==True)
def ref_gather_match_outer(axis=1):
def inner(data, ind):
if ind.size == 0 or data.shape[axis] == 0:
shape = list(data.shape)
shape[0] = 0
return [np.zeros(tuple(shape)).astype(np.float32)]
Reported by Pylint.
test/jit/test_convert_activation.py
53 issues
Line: 5
Column: 1
import sys
from itertools import product
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
import unittest
Reported by Pylint.
Line: 6
Column: 1
from itertools import product
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
import unittest
try:
Reported by Pylint.
Line: 7
Column: 1
from itertools import product
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
import unittest
try:
import torchvision
Reported by Pylint.
Line: 8
Column: 1
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
import unittest
try:
import torchvision
HAS_TORCHVISION = True
Reported by Pylint.
Line: 21
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 152
Column: 13
torch.sigmoid_,
torch.tanh_,
]:
def test_basic(x):
y = x + 1
activation(y)
return y
fn = torch.jit.script(test_basic)
Reported by Pylint.
Line: 70
Column: 21
for activation in activations:
def test_basic(x):
y = x + 1
z = activation(y)
return z
fn = torch.jit.script(test_basic)
self.run_pass("inline", fn.graph)
self.run_pass("constant_propagation", fn.graph)
Reported by Pylint.
Line: 136
Column: 17
for activation in activations:
def test_basic(x):
y = x + 1
activation(y, inplace=True)
return y
fn = torch.jit.script(test_basic)
self.run_pass("inline", fn.graph)
self.run_pass("constant_propagation", fn.graph)
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
from itertools import product
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
import unittest
Reported by Pylint.
Line: 9
Column: 1
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
import unittest
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
Reported by Pylint.