The following issues were found
torch/quantization/fx/_equalize.py
119 issues
Line: 8
Column: 1
from torch.fx import GraphModule
from torch.fx.graph import Node
from .utils import (
WEIGHT_INDEX_DICT,
get_new_attr_name_with_prefix,
maybe_get_next_module,
_parent_name,
)
Reported by Pylint.
Line: 14
Column: 1
maybe_get_next_module,
_parent_name,
)
from ..observer import (
PerChannelMinMaxObserver,
_with_args,
ObserverBase,
)
from ..utils import check_min_max_valid
Reported by Pylint.
Line: 19
Column: 1
_with_args,
ObserverBase,
)
from ..utils import check_min_max_valid
from collections import namedtuple
from typing import Dict, Any, List, Tuple, Optional
import warnings
Reported by Pylint.
Line: 53
Column: 30
to calculate the equalization scale.
"""
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine,
quant_min=None, quant_max=None, factory_kwargs=None) -> None:
super(_InputEqualizationObserver, self).__init__()
if qscheme not in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
raise TypeError("Input qscheme must be per-tensor")
Reported by Pylint.
Line: 53
Column: 52
to calculate the equalization scale.
"""
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine,
quant_min=None, quant_max=None, factory_kwargs=None) -> None:
super(_InputEqualizationObserver, self).__init__()
if qscheme not in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
raise TypeError("Input qscheme must be per-tensor")
Reported by Pylint.
Line: 57
Column: 53
quant_min=None, quant_max=None, factory_kwargs=None) -> None:
super(_InputEqualizationObserver, self).__init__()
if qscheme not in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
raise TypeError("Input qscheme must be per-tensor")
self.dtype = dtype
self.qscheme = qscheme
Reported by Pylint.
Line: 57
Column: 28
quant_min=None, quant_max=None, factory_kwargs=None) -> None:
super(_InputEqualizationObserver, self).__init__()
if qscheme not in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
raise TypeError("Input qscheme must be per-tensor")
self.dtype = dtype
self.qscheme = qscheme
Reported by Pylint.
Line: 69
Column: 35
quant_max=quant_max,
factory_kwargs=factory_kwargs)
self.equalization_scale = torch.tensor(1)
self.equalization_shape: List[int] = []
def forward(self, x_orig):
if not (x_orig.ndim >= 2 and x_orig.ndim <= 5):
raise ValueError("InputEqualizationObserver only supports Linear and Conv layers")
Reported by Pylint.
Line: 88
Column: 73
def set_equalization_scale(self, equalization_scale):
# Reshape the equalization scale along axis=1 so that it can be
# multiplied with the input along axis=1
if equalization_scale.nelement() == 1 and equalization_scale == torch.tensor(1):
return
self.equalization_scale = torch.reshape(equalization_scale, self.equalization_shape)
def calculate_scaled_minmax(self):
r""" Returns the scaled min/max inputs
Reported by Pylint.
Line: 90
Column: 35
# multiplied with the input along axis=1
if equalization_scale.nelement() == 1 and equalization_scale == torch.tensor(1):
return
self.equalization_scale = torch.reshape(equalization_scale, self.equalization_shape)
def calculate_scaled_minmax(self):
r""" Returns the scaled min/max inputs
"""
if self.equalization_scale.nelement() == 1 and self.equalization_scale == torch.tensor(1):
Reported by Pylint.
test/jit/test_hooks.py
119 issues
Line: 6
Column: 1
import unittest
from typing import Tuple
import torch
from jit.test_hooks_modules import (
ModuleDirectforwardSubmodCall, ModuleForwardSingleInput,
ModuleForwardTupleInput, create_forward_tuple_input,
create_module_forward_multiple_inputs, create_module_forward_single_input,
create_module_hook_return_nothing,
Reported by Pylint.
Line: 26
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
Reported by Pylint.
Line: 133
Column: 9
m.submodule.register_forward_pre_hook(prehook)
def prehook(self, input: Tuple[str]) -> Tuple[str]:
return "This is the second hook"
m.submodule.register_forward_pre_hook(prehook)
with self.assertRaisesRegex(
Reported by Pylint.
Line: 152
Column: 9
m.submodule.register_forward_hook(hook)
def hook(self, input: Tuple[str]):
return "This is the second hook"
m.submodule.register_forward_hook(hook)
with self.assertRaisesRegex(
Reported by Pylint.
Line: 110
Column: 23
# Hooks can't have the same name as methods.
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def foo(self, input: Tuple[str]) -> Tuple[str]:
assert self.name == "inner_mod_name"
assert input[0] == "a_outermod"
return ("pre_hook_override_name",)
m.submodule.register_forward_pre_hook(foo)
Reported by Pylint.
Line: 128
Column: 27
# Test edge case of two hooks sharing name but not python definition
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def prehook(self, input: Tuple[str]) -> Tuple[str]:
return "This is the first hook"
m.submodule.register_forward_pre_hook(prehook)
def prehook(self, input: Tuple[str]) -> Tuple[str]:
Reported by Pylint.
Line: 128
Column: 27
# Test edge case of two hooks sharing name but not python definition
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def prehook(self, input: Tuple[str]) -> Tuple[str]:
return "This is the first hook"
m.submodule.register_forward_pre_hook(prehook)
def prehook(self, input: Tuple[str]) -> Tuple[str]:
Reported by Pylint.
Line: 128
Column: 21
# Test edge case of two hooks sharing name but not python definition
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def prehook(self, input: Tuple[str]) -> Tuple[str]:
return "This is the first hook"
m.submodule.register_forward_pre_hook(prehook)
def prehook(self, input: Tuple[str]) -> Tuple[str]:
Reported by Pylint.
Line: 133
Column: 27
m.submodule.register_forward_pre_hook(prehook)
def prehook(self, input: Tuple[str]) -> Tuple[str]:
return "This is the second hook"
m.submodule.register_forward_pre_hook(prehook)
with self.assertRaisesRegex(
Reported by Pylint.
Line: 133
Column: 21
m.submodule.register_forward_pre_hook(prehook)
def prehook(self, input: Tuple[str]) -> Tuple[str]:
return "This is the second hook"
m.submodule.register_forward_pre_hook(prehook)
with self.assertRaisesRegex(
Reported by Pylint.
tools/stats/print_test_stats.py
118 issues
Line: 19
Column: 1
Set, Tuple, cast)
from xml.dom import minidom
from typing_extensions import TypedDict
from tools.stats.s3_stat_parser import (newify_case, get_S3_object_from_bucket, get_test_stats_summaries_for_job,
Report, Status, Commit, HAVE_BOTO3, Version2Case, VersionedReport,
Version1Report, Version2Report, ReportMetaMeta)
from tools.stats.scribe import send_to_scribe
Reported by Pylint.
Line: 51
Column: 3
cases: List[CaseDiff]
# TODO: consolidate this with the get_cases function from
# tools/stats/test_history.py
# Here we translate to a three-layer format (file -> suite -> case)
# rather than a two-layer format (suite -> case) because as mentioned in
# a comment in the body of this function, if we consolidate suites that
Reported by Pylint.
Line: 118
Column: 5
def display_stat(
x: Stat,
format: Tuple[Tuple[int, int], Tuple[int, int]],
) -> str:
spread_len = format[1][0] + 1 + format[1][1]
spread = x['spread']
if spread is not None:
spread_str = f' ± {spread:{spread_len}.{format[1][1]}f}s'
Reported by Pylint.
Line: 182
Column: 5
def matching_test_times(
*,
base_reports: Dict[Commit, List[SimplerReport]],
filename: str,
suite_name: str,
case_name: str,
status: Status,
) -> List[float]:
times: List[float] = []
Reported by Pylint.
Line: 183
Column: 5
*,
base_reports: Dict[Commit, List[SimplerReport]],
filename: str,
suite_name: str,
case_name: str,
status: Status,
) -> List[float]:
times: List[float] = []
for reports in base_reports.values():
Reported by Pylint.
Line: 226
Column: 9
modified_suites: List[SuiteDiff] = []
added_suites: List[SuiteDiff] = []
for filename, suite_name in sorted(all_suites):
case_diffs: List[CaseDiff] = []
head_suite = head_report.get(filename, {}).get(suite_name)
base_cases: Dict[str, Status] = dict(sorted(set.intersection(*[
{
(n, case['status'])
Reported by Pylint.
Line: 226
Column: 19
modified_suites: List[SuiteDiff] = []
added_suites: List[SuiteDiff] = []
for filename, suite_name in sorted(all_suites):
case_diffs: List[CaseDiff] = []
head_suite = head_report.get(filename, {}).get(suite_name)
base_cases: Dict[str, Status] = dict(sorted(set.intersection(*[
{
(n, case['status'])
Reported by Pylint.
Line: 233
Column: 31
{
(n, case['status'])
for n, case
in report.get(filename, {}).get(suite_name, {}).items()
}
for report in base_report
] or [set()])))
case_stats: Dict[str, Stat] = {}
if head_suite:
Reported by Pylint.
Line: 233
Column: 49
{
(n, case['status'])
for n, case
in report.get(filename, {}).get(suite_name, {}).items()
}
for report in base_report
] or [set()])))
case_stats: Dict[str, Stat] = {}
if head_suite:
Reported by Pylint.
Line: 593
Column: 22
rc += f' skipped: {self.skipped_count}'
return f'TestSuite({rc})'
def append(self, test_case: TestCase) -> None:
self.test_cases[test_case.name] = test_case
self.total_time += test_case.time
self.failed_count += 1 if test_case.failed else 0
self.skipped_count += 1 if test_case.skipped else 0
self.errored_count += 1 if test_case.errored else 0
Reported by Pylint.
torch/optim/_multi_tensor/_functional.py
118 issues
Line: 10
Column: 16
def _make_sparse(grad, grad_indices, values):
size = grad.size()
if grad_indices.numel() == 0 or values.numel() == 0:
return torch.empty_like(grad)
return torch.sparse_coo_tensor(grad_indices, values, size)
def adagrad(
params: List[Tensor],
Reported by Pylint.
Line: 11
Column: 12
size = grad.size()
if grad_indices.numel() == 0 or values.numel() == 0:
return torch.empty_like(grad)
return torch.sparse_coo_tensor(grad_indices, values, size)
def adagrad(
params: List[Tensor],
grads: List[Tensor],
Reported by Pylint.
Line: 36
Column: 9
raise RuntimeError(
"weight_decay option is not compatible with sparse gradients"
)
torch._foreach_add_(grads, params, alpha=weight_decay)
minus_clr = [-lr / (1 + (step - 1) * lr_decay) for step in state_steps]
if has_sparse_grad:
# sparse is not supported by multi_tensor. Fall back to optim.adagrad
Reported by Pylint.
Line: 59
Column: 9
alpha=minus_clr[i],
)
else:
torch._foreach_addcmul_(state_sums, grads, grads, value=1)
std = torch._foreach_add(torch._foreach_sqrt(state_sums), eps)
torch._foreach_addcdiv_(
params, torch._foreach_mul(grads, minus_clr), std
)
Reported by Pylint.
Line: 60
Column: 15
)
else:
torch._foreach_addcmul_(state_sums, grads, grads, value=1)
std = torch._foreach_add(torch._foreach_sqrt(state_sums), eps)
torch._foreach_addcdiv_(
params, torch._foreach_mul(grads, minus_clr), std
)
Reported by Pylint.
Line: 60
Column: 34
)
else:
torch._foreach_addcmul_(state_sums, grads, grads, value=1)
std = torch._foreach_add(torch._foreach_sqrt(state_sums), eps)
torch._foreach_addcdiv_(
params, torch._foreach_mul(grads, minus_clr), std
)
Reported by Pylint.
Line: 61
Column: 9
else:
torch._foreach_addcmul_(state_sums, grads, grads, value=1)
std = torch._foreach_add(torch._foreach_sqrt(state_sums), eps)
torch._foreach_addcdiv_(
params, torch._foreach_mul(grads, minus_clr), std
)
def adamax(params: List[Tensor],
Reported by Pylint.
Line: 62
Column: 21
torch._foreach_addcmul_(state_sums, grads, grads, value=1)
std = torch._foreach_add(torch._foreach_sqrt(state_sums), eps)
torch._foreach_addcdiv_(
params, torch._foreach_mul(grads, minus_clr), std
)
def adamax(params: List[Tensor],
grads: List[Tensor],
Reported by Pylint.
Line: 83
Column: 9
"""
if weight_decay != 0:
torch._foreach_add_(grads, params, alpha=weight_decay)
# Update biased first moment estimate.
torch._foreach_mul_(exp_avgs, beta1)
torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1)
Reported by Pylint.
Line: 86
Column: 5
torch._foreach_add_(grads, params, alpha=weight_decay)
# Update biased first moment estimate.
torch._foreach_mul_(exp_avgs, beta1)
torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1)
# Update the exponentially weighted infinity norm.
torch._foreach_mul_(exp_infs, beta2)
Reported by Pylint.
benchmarks/tensorexpr/broadcast.py
116 issues
Line: 1
Column: 1
from . import benchmark
import itertools
import numpy as np
import torch
class BroadcastMulBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, case, M, N, K):
super().__init__(mode, device, dtype)
Reported by Pylint.
Line: 4
Column: 1
from . import benchmark
import itertools
import numpy as np
import torch
class BroadcastMulBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, case, M, N, K):
super().__init__(mode, device, dtype)
Reported by Pylint.
Line: 151
Column: 3
# benchmark.register_benchmark_class(BroadcastColBench)
# benchmark.register_benchmark_class(BroadcastThreeArgs)
# TODO: merge this with elementwise bench
# A template class for elementwise operations.
# A derived class will override the class instance to customize its behavior.
class BroadcastBench(benchmark.Benchmark):
# List of customization class variables.
op_str = None
Reported by Pylint.
Line: 252
Column: 13
["div", lambda a, b: a / (b + 1e-4)],
[
"pow",
lambda a, b: torch.pow(a, b),
lambda a, b: np.power(a, b),
], # no fuson triggered
["max", lambda a, b: torch.max(a, b), lambda a, b: np.maximum(a, b)],
["min", lambda a, b: torch.min(a, b), lambda a, b: np.minimum(a, b)],
]
Reported by Pylint.
Line: 253
Column: 13
[
"pow",
lambda a, b: torch.pow(a, b),
lambda a, b: np.power(a, b),
], # no fuson triggered
["max", lambda a, b: torch.max(a, b), lambda a, b: np.maximum(a, b)],
["min", lambda a, b: torch.min(a, b), lambda a, b: np.minimum(a, b)],
]
Reported by Pylint.
Line: 255
Column: 47
lambda a, b: torch.pow(a, b),
lambda a, b: np.power(a, b),
], # no fuson triggered
["max", lambda a, b: torch.max(a, b), lambda a, b: np.maximum(a, b)],
["min", lambda a, b: torch.min(a, b), lambda a, b: np.minimum(a, b)],
]
unary_op_list = [
["erf", lambda x: torch.erf(x), lambda x: np.erf(x)],
Reported by Pylint.
Line: 255
Column: 17
lambda a, b: torch.pow(a, b),
lambda a, b: np.power(a, b),
], # no fuson triggered
["max", lambda a, b: torch.max(a, b), lambda a, b: np.maximum(a, b)],
["min", lambda a, b: torch.min(a, b), lambda a, b: np.minimum(a, b)],
]
unary_op_list = [
["erf", lambda x: torch.erf(x), lambda x: np.erf(x)],
Reported by Pylint.
Line: 256
Column: 17
lambda a, b: np.power(a, b),
], # no fuson triggered
["max", lambda a, b: torch.max(a, b), lambda a, b: np.maximum(a, b)],
["min", lambda a, b: torch.min(a, b), lambda a, b: np.minimum(a, b)],
]
unary_op_list = [
["erf", lambda x: torch.erf(x), lambda x: np.erf(x)],
["exp", lambda x: torch.exp(x), lambda x: np.exp(x)],
Reported by Pylint.
Line: 256
Column: 47
lambda a, b: np.power(a, b),
], # no fuson triggered
["max", lambda a, b: torch.max(a, b), lambda a, b: np.maximum(a, b)],
["min", lambda a, b: torch.min(a, b), lambda a, b: np.minimum(a, b)],
]
unary_op_list = [
["erf", lambda x: torch.erf(x), lambda x: np.erf(x)],
["exp", lambda x: torch.exp(x), lambda x: np.exp(x)],
Reported by Pylint.
Line: 260
Column: 17
]
unary_op_list = [
["erf", lambda x: torch.erf(x), lambda x: np.erf(x)],
["exp", lambda x: torch.exp(x), lambda x: np.exp(x)],
["sin", lambda x: torch.sin(x), lambda x: np.sin(x)],
["cos", lambda x: torch.cos(x), lambda x: np.cos(x)],
]
Reported by Pylint.
torch/cuda/amp/grad_scaler.py
116 issues
Line: 6
Column: 1
import warnings
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
from .common import amp_definitely_not_available
class _MultiDeviceReplicator(object):
"""
Lazily serves copies of a tensor to requested devices. Copies are cached per-device.
Reported by Pylint.
Line: 16
Column: 40
def __init__(self, master_tensor: torch.Tensor) -> None:
assert master_tensor.is_cuda or master_tensor.device.type == 'xla'
self.master = master_tensor
self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
def get(self, device) -> torch.Tensor:
retval = self._per_device_tensors.get(device, None)
if retval is None:
retval = self.master.to(device=device, non_blocking=True, copy=True)
Reported by Pylint.
Line: 143
Column: 23
def _lazy_init_scale_growth_tracker(self, dev):
assert self._growth_tracker is None, "_growth_tracker initialized before _scale"
self._scale = torch.full((1,), self._init_scale, dtype=torch.float32, device=dev)
self._growth_tracker = torch.full((1,), self._init_growth_tracker, dtype=torch.int32, device=dev)
def scale(self, outputs):
"""
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Reported by Pylint.
Line: 143
Column: 64
def _lazy_init_scale_growth_tracker(self, dev):
assert self._growth_tracker is None, "_growth_tracker initialized before _scale"
self._scale = torch.full((1,), self._init_scale, dtype=torch.float32, device=dev)
self._growth_tracker = torch.full((1,), self._init_growth_tracker, dtype=torch.int32, device=dev)
def scale(self, outputs):
"""
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Reported by Pylint.
Line: 144
Column: 32
def _lazy_init_scale_growth_tracker(self, dev):
assert self._growth_tracker is None, "_growth_tracker initialized before _scale"
self._scale = torch.full((1,), self._init_scale, dtype=torch.float32, device=dev)
self._growth_tracker = torch.full((1,), self._init_growth_tracker, dtype=torch.int32, device=dev)
def scale(self, outputs):
"""
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Reported by Pylint.
Line: 144
Column: 82
def _lazy_init_scale_growth_tracker(self, dev):
assert self._growth_tracker is None, "_growth_tracker initialized before _scale"
self._scale = torch.full((1,), self._init_scale, dtype=torch.float32, device=dev)
self._growth_tracker = torch.full((1,), self._init_growth_tracker, dtype=torch.int32, device=dev)
def scale(self, outputs):
"""
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Reported by Pylint.
Line: 206
Column: 65
for param in group["params"]:
if param.grad is None:
continue
if (not allow_fp16) and param.grad.dtype == torch.float16:
raise ValueError("Attempting to unscale FP16 gradients.")
if param.grad.is_sparse:
# is_coalesced() == False means the sparse grad has values with duplicate indices.
# coalesce() deduplicates indices and adds all values that have the same index.
# For scaled fp16 values, there's a good chance coalescing will cause overflow,
Reported by Pylint.
Line: 213
Column: 48
# coalesce() deduplicates indices and adds all values that have the same index.
# For scaled fp16 values, there's a good chance coalescing will cause overflow,
# so we should check the coalesced _values().
if param.grad.dtype is torch.float16:
param.grad = param.grad.coalesce()
to_unscale = param.grad._values()
else:
to_unscale = param.grad
Reported by Pylint.
Line: 224
Column: 21
for device, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
torch._amp_foreach_non_finite_check_and_unscale_(grads,
per_device_found_inf.get(device),
per_device_inv_scale.get(device))
return per_device_found_inf._per_device_tensors
Reported by Pylint.
Line: 277
Column: 21
# FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
assert self._scale is not None
inv_scale = self._scale.double().reciprocal().float()
found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=self._scale.device)
optimizer_state["found_inf_per_device"] = self._unscale_grads_(optimizer, inv_scale, found_inf, False)
optimizer_state["stage"] = OptState.UNSCALED
def _maybe_opt_step(self, optimizer, optimizer_state, *args, **kwargs):
Reported by Pylint.
test/onnx/test_models.py
115 issues
Line: 1
Column: 1
from torchvision.models.alexnet import alexnet
from torchvision.models.inception import inception_v3
from torchvision.models.densenet import densenet121
from torchvision.models.resnet import resnet50
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
from torchvision.models.googlenet import googlenet
from torchvision.models.mnasnet import mnasnet1_0
from torchvision.models.mobilenet import mobilenet_v2
from torchvision.models import shufflenet_v2_x1_0
Reported by Pylint.
Line: 2
Column: 1
from torchvision.models.alexnet import alexnet
from torchvision.models.inception import inception_v3
from torchvision.models.densenet import densenet121
from torchvision.models.resnet import resnet50
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
from torchvision.models.googlenet import googlenet
from torchvision.models.mnasnet import mnasnet1_0
from torchvision.models.mobilenet import mobilenet_v2
from torchvision.models import shufflenet_v2_x1_0
Reported by Pylint.
Line: 3
Column: 1
from torchvision.models.alexnet import alexnet
from torchvision.models.inception import inception_v3
from torchvision.models.densenet import densenet121
from torchvision.models.resnet import resnet50
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
from torchvision.models.googlenet import googlenet
from torchvision.models.mnasnet import mnasnet1_0
from torchvision.models.mobilenet import mobilenet_v2
from torchvision.models import shufflenet_v2_x1_0
Reported by Pylint.
Line: 4
Column: 1
from torchvision.models.alexnet import alexnet
from torchvision.models.inception import inception_v3
from torchvision.models.densenet import densenet121
from torchvision.models.resnet import resnet50
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
from torchvision.models.googlenet import googlenet
from torchvision.models.mnasnet import mnasnet1_0
from torchvision.models.mobilenet import mobilenet_v2
from torchvision.models import shufflenet_v2_x1_0
Reported by Pylint.
Line: 5
Column: 1
from torchvision.models.inception import inception_v3
from torchvision.models.densenet import densenet121
from torchvision.models.resnet import resnet50
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
from torchvision.models.googlenet import googlenet
from torchvision.models.mnasnet import mnasnet1_0
from torchvision.models.mobilenet import mobilenet_v2
from torchvision.models import shufflenet_v2_x1_0
from torchvision.models.segmentation.segmentation import fcn_resnet101, deeplabv3_resnet101
Reported by Pylint.
Line: 6
Column: 1
from torchvision.models.densenet import densenet121
from torchvision.models.resnet import resnet50
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
from torchvision.models.googlenet import googlenet
from torchvision.models.mnasnet import mnasnet1_0
from torchvision.models.mobilenet import mobilenet_v2
from torchvision.models import shufflenet_v2_x1_0
from torchvision.models.segmentation.segmentation import fcn_resnet101, deeplabv3_resnet101
from torchvision.models.video import r3d_18, mc3_18, r2plus1d_18
Reported by Pylint.
Line: 7
Column: 1
from torchvision.models.resnet import resnet50
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
from torchvision.models.googlenet import googlenet
from torchvision.models.mnasnet import mnasnet1_0
from torchvision.models.mobilenet import mobilenet_v2
from torchvision.models import shufflenet_v2_x1_0
from torchvision.models.segmentation.segmentation import fcn_resnet101, deeplabv3_resnet101
from torchvision.models.video import r3d_18, mc3_18, r2plus1d_18
Reported by Pylint.
Line: 8
Column: 1
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
from torchvision.models.googlenet import googlenet
from torchvision.models.mnasnet import mnasnet1_0
from torchvision.models.mobilenet import mobilenet_v2
from torchvision.models import shufflenet_v2_x1_0
from torchvision.models.segmentation.segmentation import fcn_resnet101, deeplabv3_resnet101
from torchvision.models.video import r3d_18, mc3_18, r2plus1d_18
from model_defs.mnist import MNIST
Reported by Pylint.
Line: 9
Column: 1
from torchvision.models.googlenet import googlenet
from torchvision.models.mnasnet import mnasnet1_0
from torchvision.models.mobilenet import mobilenet_v2
from torchvision.models import shufflenet_v2_x1_0
from torchvision.models.segmentation.segmentation import fcn_resnet101, deeplabv3_resnet101
from torchvision.models.video import r3d_18, mc3_18, r2plus1d_18
from model_defs.mnist import MNIST
from model_defs.squeezenet import SqueezeNet
Reported by Pylint.
Line: 10
Column: 1
from torchvision.models.mnasnet import mnasnet1_0
from torchvision.models.mobilenet import mobilenet_v2
from torchvision.models import shufflenet_v2_x1_0
from torchvision.models.segmentation.segmentation import fcn_resnet101, deeplabv3_resnet101
from torchvision.models.video import r3d_18, mc3_18, r2plus1d_18
from model_defs.mnist import MNIST
from model_defs.squeezenet import SqueezeNet
from model_defs.super_resolution import SuperResolutionNet
Reported by Pylint.
test/jit/test_cuda.py
115 issues
Line: 6
Column: 1
import gc
import unittest
import torch
from typing import NamedTuple
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import skipIfRocm, skipCUDANonDefaultStreamIf
Reported by Pylint.
Line: 8
Column: 1
import torch
from typing import NamedTuple
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import skipIfRocm, skipCUDANonDefaultStreamIf
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
Reported by Pylint.
Line: 9
Column: 1
import torch
from typing import NamedTuple
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import skipIfRocm, skipCUDANonDefaultStreamIf
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 10
Column: 1
from typing import NamedTuple
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import skipIfRocm, skipCUDANonDefaultStreamIf
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
Reported by Pylint.
Line: 45
Column: 5
"""
A suite of tests for the CUDA API in TorchScript.
"""
def setUp(self):
super(TestCUDA, self).setUp()
def tearDown(self):
gc.collect()
torch.cuda.empty_cache()
Reported by Pylint.
Line: 450
Column: 13
start_event = torch.cuda.Event(True, False, False)
stream.record_event(start_event)
tensor1 = torch.rand(1000000000, 1000000000, device="cuda")
tensor2 = torch.mm(tensor1, tensor1).to("cuda")
stream.record_event(event)
event.synchronize()
is_again_true_event_query = event.query()
if not (is_true_event_query and is_again_true_event_query):
Reported by Pylint.
Line: 465
Column: 13
# computed on the stream. The stream.query should be true once the synchroniztion is done
@torch.jit.script
def test_stream_synchronize() -> float:
device_index = torch.cuda.current_device()
s = torch.cuda.Stream()
e_tik = torch.cuda.Event(True, False, False)
e_tok = torch.cuda.Event(True, False, False)
e_tik.record(s)
Reported by Pylint.
Line: 473
Column: 17
e_tik.record(s)
tensor1 = torch.rand(1000000000, 1000000000, device="cuda")
with torch.cuda.stream(s):
tensor2 = torch.mm(tensor1, tensor1).to("cuda")
s.synchronize()
e_tok.record(s)
e_tok.synchronize()
if not s.query():
Reported by Pylint.
Line: 498
Column: 17
e_tik.record(s)
tensor1 = torch.rand(1000000000, 1000000000, device="cuda")
with torch.cuda.stream(s):
tensor = torch.mm(tensor1, tensor1).to("cuda")
s.record_event(e_tok)
e_tok.synchronize()
s.synchronize()
if not s.query():
Reported by Pylint.
Line: 528
Column: 17
e_tik.record(s0)
tensor1 = torch.rand(1000000000, 1000000000, device="cuda")
with torch.cuda.stream(s0):
tensor2 = torch.mm(tensor1, tensor1).cuda()
e_sync = torch.cuda.Event(True, False, False)
e_sync.record(torch.cuda.current_stream(device))
e_sync.wait(s1)
with torch.cuda.stream(s1):
tensor3 = torch.rand(1000000000, 1000000000, device="cuda")
Reported by Pylint.
torch/quantization/fx/convert.py
114 issues
Line: 14
Column: 1
Node,
)
from torch.fx.node import Argument
from .quantization_types import Pattern
from ..qconfig import QConfigAny
from .match_utils import (
find_matches,
)
from .graph_module import (
Reported by Pylint.
Line: 15
Column: 1
)
from torch.fx.node import Argument
from .quantization_types import Pattern
from ..qconfig import QConfigAny
from .match_utils import (
find_matches,
)
from .graph_module import (
is_observed_module,
Reported by Pylint.
Line: 16
Column: 1
from torch.fx.node import Argument
from .quantization_types import Pattern
from ..qconfig import QConfigAny
from .match_utils import (
find_matches,
)
from .graph_module import (
is_observed_module,
is_observed_standalone_module,
Reported by Pylint.
Line: 19
Column: 1
from .match_utils import (
find_matches,
)
from .graph_module import (
is_observed_module,
is_observed_standalone_module,
QuantizedGraphModule,
)
from .quantization_patterns import (
Reported by Pylint.
Line: 24
Column: 1
is_observed_standalone_module,
QuantizedGraphModule,
)
from .quantization_patterns import (
QuantizeHandler,
)
from ._equalize import update_obs_for_equalization, convert_eq_obs
from .utils import (
is_get_tensor_info_node,
Reported by Pylint.
Line: 27
Column: 1
from .quantization_patterns import (
QuantizeHandler,
)
from ._equalize import update_obs_for_equalization, convert_eq_obs
from .utils import (
is_get_tensor_info_node,
node_return_type_is_int,
quantize_node,
get_new_attr_name_with_prefix,
Reported by Pylint.
Line: 28
Column: 1
QuantizeHandler,
)
from ._equalize import update_obs_for_equalization, convert_eq_obs
from .utils import (
is_get_tensor_info_node,
node_return_type_is_int,
quantize_node,
get_new_attr_name_with_prefix,
collect_producer_nodes,
Reported by Pylint.
Line: 39
Column: 1
WEIGHT_INDEX_DICT,
)
from ..quantize import (
_remove_qconfig,
is_activation_post_process,
)
from ..utils import (
activation_is_statically_quantized,
Reported by Pylint.
Line: 43
Column: 1
_remove_qconfig,
is_activation_post_process,
)
from ..utils import (
activation_is_statically_quantized,
activation_dtype,
)
# weight prepacking ops
Reported by Pylint.
Line: 199
Column: 34
run_weight_observers(model)
quantized_graph = Graph()
env: Dict[str, Dict[Optional[torch.dtype], Node]] = defaultdict(lambda: defaultdict(Node)) # type: ignore[arg-type]
graph_inputs: List[str] = []
for node in model.graph.nodes:
if node.op == 'placeholder':
graph_inputs.append(node.name)
Reported by Pylint.
torch/nn/modules/conv.py
114 issues
Line: 8
Column: 1
import torch
from torch import Tensor
from torch.nn.parameter import Parameter, UninitializedParameter
from .. import functional as F
from .. import init
from .lazy import LazyModuleMixin
from .module import Module
from .utils import _single, _pair, _triple, _reverse_repeat_tuple
from torch._torch_docs import reproducibility_notes
Reported by Pylint.
Line: 9
Column: 1
from torch import Tensor
from torch.nn.parameter import Parameter, UninitializedParameter
from .. import functional as F
from .. import init
from .lazy import LazyModuleMixin
from .module import Module
from .utils import _single, _pair, _triple, _reverse_repeat_tuple
from torch._torch_docs import reproducibility_notes
Reported by Pylint.
Line: 10
Column: 1
from torch.nn.parameter import Parameter, UninitializedParameter
from .. import functional as F
from .. import init
from .lazy import LazyModuleMixin
from .module import Module
from .utils import _single, _pair, _triple, _reverse_repeat_tuple
from torch._torch_docs import reproducibility_notes
from ..common_types import _size_1_t, _size_2_t, _size_3_t
Reported by Pylint.
Line: 11
Column: 1
from .. import functional as F
from .. import init
from .lazy import LazyModuleMixin
from .module import Module
from .utils import _single, _pair, _triple, _reverse_repeat_tuple
from torch._torch_docs import reproducibility_notes
from ..common_types import _size_1_t, _size_2_t, _size_3_t
from typing import Optional, List, Tuple, Union
Reported by Pylint.
Line: 12
Column: 1
from .. import init
from .lazy import LazyModuleMixin
from .module import Module
from .utils import _single, _pair, _triple, _reverse_repeat_tuple
from torch._torch_docs import reproducibility_notes
from ..common_types import _size_1_t, _size_2_t, _size_3_t
from typing import Optional, List, Tuple, Union
Reported by Pylint.
Line: 15
Column: 1
from .utils import _single, _pair, _triple, _reverse_repeat_tuple
from torch._torch_docs import reproducibility_notes
from ..common_types import _size_1_t, _size_2_t, _size_3_t
from typing import Optional, List, Tuple, Union
convolution_notes = \
{"groups_note": r"""* :attr:`groups` controls the connections between inputs and outputs.
:attr:`in_channels` and :attr:`out_channels` must both be divisible by
Reported by Pylint.
Line: 128
Column: 37
self._reversed_padding_repeated_twice = _reverse_repeat_tuple(self.padding, 2)
if transposed:
self.weight = Parameter(torch.empty(
(in_channels, out_channels // groups, *kernel_size), **factory_kwargs))
else:
self.weight = Parameter(torch.empty(
(out_channels, in_channels // groups, *kernel_size), **factory_kwargs))
if bias:
Reported by Pylint.
Line: 131
Column: 37
self.weight = Parameter(torch.empty(
(in_channels, out_channels // groups, *kernel_size), **factory_kwargs))
else:
self.weight = Parameter(torch.empty(
(out_channels, in_channels // groups, *kernel_size), **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(out_channels, **factory_kwargs))
else:
self.register_parameter('bias', None)
Reported by Pylint.
Line: 134
Column: 35
self.weight = Parameter(torch.empty(
(out_channels, in_channels // groups, *kernel_size), **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(out_channels, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
Reported by Pylint.
Line: 50
Column: 29
'out_channels', 'kernel_size']
__annotations__ = {'bias': Optional[torch.Tensor]}
def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor:
...
_in_channels: int
_reversed_padding_repeated_twice: List[int]
out_channels: int
Reported by Pylint.