The following issues were found
tools/codegen/api/translate.py
18 issues
Line: 89
Column: 3
# While we're at it, do some simple forward inference, looking through
# constructors.
# TODO: My kingdom for a pattern matcher
# https://www.python.org/dev/peps/pep-0634/
# TODO: This could get us in recomputation trouble if b.expr is nontrivial
t = b.type
if isinstance(t, ConstRefCType) and isinstance(t.elem, OptionalCType) and \
isinstance(t.elem.elem, BaseCType) and str(t.elem.elem.type) == 'at::Tensor':
Reported by Pylint.
Line: 91
Column: 3
# constructors.
# TODO: My kingdom for a pattern matcher
# https://www.python.org/dev/peps/pep-0634/
# TODO: This could get us in recomputation trouble if b.expr is nontrivial
t = b.type
if isinstance(t, ConstRefCType) and isinstance(t.elem, OptionalCType) and \
isinstance(t.elem.elem, BaseCType) and str(t.elem.elem.type) == 'at::Tensor':
ctx[NamedCType(t.elem.elem.name, ConstRefCType(BaseCType(tensorT)))] = \
f'({b.expr}.has_value() ? *{b.expr} : at::Tensor())'
Reported by Pylint.
Line: 1
Column: 1
from typing import Dict, Sequence, List, NoReturn, Union
from tools.codegen.api.types import (BaseCType, Binding, ConstRefCType,
Expr, MutRefCType, OptionalCType,
NamedCType, SpecialArgName, tensorT,
memoryFormatT, tensorOptionsT, scalarTypeT,
boolT, deviceT, layoutT, optionalTensorRefT,
scalarT, optionalScalarRefT)
# This file implements a small program synthesis engine that implements
Reported by Pylint.
Line: 39
Column: 1
options_ctype = NamedCType("options", ConstRefCType(BaseCType(tensorOptionsT)))
class UnsatError(RuntimeError):
pass
# Given a set of in-scope bindings and a set of target bindings, synthesize
# a list of expressions that uses only the in-scope bindings (bindings) that
# have all of the types of goals. You may want to use this function if
Reported by Pylint.
Line: 59
Column: 1
# 'goals', an (ordered) list of NamedCType goals is sufficient. If you are doing
# something more complicated, e.g., tracking the set of bindings in a context,
# you may find using these smaller types more convenient.
def translate(
bindings: Sequence[Union[Expr, Binding]],
goals: Sequence[Union[NamedCType, Binding]],
*, method: bool = False
) -> List[Expr]:
Reported by Pylint.
Line: 59
Column: 1
# 'goals', an (ordered) list of NamedCType goals is sufficient. If you are doing
# something more complicated, e.g., tracking the set of bindings in a context,
# you may find using these smaller types more convenient.
def translate(
bindings: Sequence[Union[Expr, Binding]],
goals: Sequence[Union[NamedCType, Binding]],
*, method: bool = False
) -> List[Expr]:
Reported by Pylint.
Line: 66
Column: 9
) -> List[Expr]:
binding_exprs: List[Expr] = []
for b in bindings:
if isinstance(b, Binding):
binding_exprs.append(Expr(
expr=b.name,
type=b.nctype,
))
Reported by Pylint.
Line: 76
Column: 9
binding_exprs.append(b)
goal_ctypes: List[NamedCType] = []
for g in goals:
if isinstance(g, Binding):
goal_ctypes.append(g.nctype)
else:
goal_ctypes.append(g)
Reported by Pylint.
Line: 84
Column: 9
# Add all the bindings to the context
ctx: Dict[NamedCType, str] = {}
for b in binding_exprs:
ctx[b.type] = b.expr
# While we're at it, do some simple forward inference, looking through
# constructors.
# TODO: My kingdom for a pattern matcher
Reported by Pylint.
Line: 92
Column: 9
# TODO: My kingdom for a pattern matcher
# https://www.python.org/dev/peps/pep-0634/
# TODO: This could get us in recomputation trouble if b.expr is nontrivial
t = b.type
if isinstance(t, ConstRefCType) and isinstance(t.elem, OptionalCType) and \
isinstance(t.elem.elem, BaseCType) and str(t.elem.elem.type) == 'at::Tensor':
ctx[NamedCType(t.elem.elem.name, ConstRefCType(BaseCType(tensorT)))] = \
f'({b.expr}.has_value() ? *{b.expr} : at::Tensor())'
Reported by Pylint.
tools/stats/export_slow_tests.py
18 issues
Line: 43
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b310-urllib-urlopen
def get_test_infra_slow_tests() -> Dict[str, float]:
url = "https://raw.githubusercontent.com/pytorch/test-infra/main/stats/slow-tests.json"
contents = urlopen(url, timeout=1).read().decode('utf-8')
return cast(Dict[str, float], json.loads(contents))
def too_similar(calculated_times: Dict[str, float], other_times: Dict[str, float], threshold: float) -> bool:
# check that their keys are the same
Reported by Bandit.
Line: 1
Column: 1
#!/usr/bin/env python3
import argparse
import json
import os
import statistics
from collections import defaultdict
from tools.stats.s3_stat_parser import get_previous_reports_for_branch, Report, Version2Report
from typing import cast, DefaultDict, Dict, List, Any
Reported by Pylint.
Line: 9
Column: 1
import statistics
from collections import defaultdict
from tools.stats.s3_stat_parser import get_previous_reports_for_branch, Report, Version2Report
from typing import cast, DefaultDict, Dict, List, Any
from urllib.request import urlopen
SLOW_TESTS_FILE = '.pytorch-slow-tests.json'
SLOW_TEST_CASE_THRESHOLD_SEC = 60.0
RELATIVE_DIFFERENCE_THRESHOLD = 0.1
Reported by Pylint.
Line: 10
Column: 1
from collections import defaultdict
from tools.stats.s3_stat_parser import get_previous_reports_for_branch, Report, Version2Report
from typing import cast, DefaultDict, Dict, List, Any
from urllib.request import urlopen
SLOW_TESTS_FILE = '.pytorch-slow-tests.json'
SLOW_TEST_CASE_THRESHOLD_SEC = 60.0
RELATIVE_DIFFERENCE_THRESHOLD = 0.1
Reported by Pylint.
Line: 16
Column: 1
SLOW_TEST_CASE_THRESHOLD_SEC = 60.0
RELATIVE_DIFFERENCE_THRESHOLD = 0.1
def get_test_case_times() -> Dict[str, float]:
reports: List[Report] = get_previous_reports_for_branch('origin/viable/strict', "")
# an entry will be like ("test_doc_examples (__main__.TestTypeHints)" -> [values]))
test_names_to_times: DefaultDict[str, List[float]] = defaultdict(list)
for report in reports:
if report.get('format_version', 1) != 2: # type: ignore[misc]
Reported by Pylint.
Line: 28
Column: 1
for suitename, test_suite in test_file['suites'].items():
for casename, test_case in test_suite['cases'].items():
# The below attaches a __main__ as that matches the format of test.__class__ in
# common_utils.py (where this data will be used), and also matches what the output
# of a running test would look like.
name = f'{casename} (__main__.{suitename})'
succeeded: bool = test_case['status'] is None
if succeeded:
test_names_to_times[name].append(test_case['seconds'])
Reported by Pylint.
Line: 37
Column: 1
return {test_case: statistics.mean(times) for test_case, times in test_names_to_times.items()}
def filter_slow_tests(test_cases_dict: Dict[str, float]) -> Dict[str, float]:
return {test_case: time for test_case, time in test_cases_dict.items() if time >= SLOW_TEST_CASE_THRESHOLD_SEC}
def get_test_infra_slow_tests() -> Dict[str, float]:
url = "https://raw.githubusercontent.com/pytorch/test-infra/main/stats/slow-tests.json"
Reported by Pylint.
Line: 38
Column: 1
def filter_slow_tests(test_cases_dict: Dict[str, float]) -> Dict[str, float]:
return {test_case: time for test_case, time in test_cases_dict.items() if time >= SLOW_TEST_CASE_THRESHOLD_SEC}
def get_test_infra_slow_tests() -> Dict[str, float]:
url = "https://raw.githubusercontent.com/pytorch/test-infra/main/stats/slow-tests.json"
contents = urlopen(url, timeout=1).read().decode('utf-8')
Reported by Pylint.
Line: 41
Column: 1
return {test_case: time for test_case, time in test_cases_dict.items() if time >= SLOW_TEST_CASE_THRESHOLD_SEC}
def get_test_infra_slow_tests() -> Dict[str, float]:
url = "https://raw.githubusercontent.com/pytorch/test-infra/main/stats/slow-tests.json"
contents = urlopen(url, timeout=1).read().decode('utf-8')
return cast(Dict[str, float], json.loads(contents))
Reported by Pylint.
Line: 47
Column: 1
return cast(Dict[str, float], json.loads(contents))
def too_similar(calculated_times: Dict[str, float], other_times: Dict[str, float], threshold: float) -> bool:
# check that their keys are the same
if calculated_times.keys() != other_times.keys():
return False
for test_case, test_time in calculated_times.items():
Reported by Pylint.
tools/code_coverage/package/oss/init.py
18 issues
Line: 5
Column: 1
import os
from typing import List, Optional, Tuple, cast
from ..util.setting import (
JSON_FOLDER_BASE_DIR,
LOG_DIR,
CompilerType,
Option,
Test,
Reported by Pylint.
Line: 14
Column: 1
TestList,
TestType,
)
from ..util.utils import (
clean_up,
create_folder,
print_log,
raise_no_test_found_exception,
remove_file,
Reported by Pylint.
Line: 22
Column: 1
remove_file,
remove_folder,
)
from ..util.utils_init import add_arguments_utils, create_folders, get_options
from .utils import (
clean_up_gcda,
detect_compiler_type,
get_llvm_tool_path,
get_oss_binary_folder,
Reported by Pylint.
Line: 23
Column: 1
remove_folder,
)
from ..util.utils_init import add_arguments_utils, create_folders, get_options
from .utils import (
clean_up_gcda,
detect_compiler_type,
get_llvm_tool_path,
get_oss_binary_folder,
get_pytorch_folder,
Reported by Pylint.
Line: 1
Column: 1
import argparse
import os
from typing import List, Optional, Tuple, cast
from ..util.setting import (
JSON_FOLDER_BASE_DIR,
LOG_DIR,
CompilerType,
Option,
Reported by Pylint.
Line: 41
Column: 1
}
def initialization() -> Tuple[Option, TestList, List[str]]:
# create folder if not exists
create_folders()
# add arguments
parser = argparse.ArgumentParser()
parser = add_arguments_utils(parser)
Reported by Pylint.
Line: 67
Column: 1
return (options, test_list, interested_folders)
def add_arguments_oss(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser.add_argument(
"--run-only",
help="only run certain test(s), for example: atest test_nn.py.",
nargs="*",
default=None,
Reported by Pylint.
Line: 78
Column: 1
return parser
def parse_arguments(
parser: argparse.ArgumentParser,
) -> Tuple[Option, Optional[List[str]], Optional[List[str]], Optional[bool]]:
# parse args
args = parser.parse_args()
# get option
Reported by Pylint.
Line: 88
Column: 1
return (options, args.interest_only, args.run_only, args.clean)
def get_test_list_by_type(
run_only: Optional[List[str]], test_type: TestType
) -> TestList:
test_list: TestList = []
binary_folder = get_oss_binary_folder(test_type)
g = os.walk(binary_folder)
Reported by Pylint.
Line: 93
Column: 5
) -> TestList:
test_list: TestList = []
binary_folder = get_oss_binary_folder(test_type)
g = os.walk(binary_folder)
for _, _, file_list in g:
for file_name in file_list:
if run_only is not None and file_name not in run_only:
continue
# target pattern in oss is used in printing report -- which tests we have run
Reported by Pylint.
test/onnx/model_defs/word_language_model.py
18 issues
Line: 4
Column: 1
# The model is from here:
# https://github.com/pytorch/examples/blob/master/word_language_model/model.py
import torch
import torch.nn as nn
from torch import Tensor
from typing import Tuple, Optional
class RNNModel(nn.Module):
Reported by Pylint.
Line: 5
Column: 1
# https://github.com/pytorch/examples/blob/master/word_language_model/model.py
import torch
import torch.nn as nn
from torch import Tensor
from typing import Tuple, Optional
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
Reported by Pylint.
Line: 6
Column: 1
import torch
import torch.nn as nn
from torch import Tensor
from typing import Tuple, Optional
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
Reported by Pylint.
Line: 60
Column: 23
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))
self.hidden = RNNModel.repackage_hidden(hidden)
Reported by Pylint.
Line: 83
Column: 23
"""Detach hidden states from their history."""
return h.detach()
def forward(self, input: Tensor, hidden: Tensor):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))
self.hidden = RNNModelWithTensorHidden.repackage_hidden(hidden)
Reported by Pylint.
Line: 98
Column: 23
"""Detach hidden states from their history."""
return (h[0].detach(), h[1].detach())
def forward(self, input: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))
self.hidden = self.repackage_hidden(tuple(hidden))
Reported by Pylint.
Line: 1
Column: 1
# The model is from here:
# https://github.com/pytorch/examples/blob/master/word_language_model/model.py
import torch
import torch.nn as nn
from torch import Tensor
from typing import Tuple, Optional
class RNNModel(nn.Module):
Reported by Pylint.
Line: 7
Column: 1
import torch
import torch.nn as nn
from torch import Tensor
from typing import Tuple, Optional
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers,
Reported by Pylint.
Line: 9
Column: 1
from torch import Tensor
from typing import Tuple, Optional
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers,
dropout=0.5, tie_weights=False, batchsize=2):
super(RNNModel, self).__init__()
Reported by Pylint.
Line: 12
Column: 5
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers,
dropout=0.5, tie_weights=False, batchsize=2):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
Reported by Pylint.
torch/distributions/distribution.py
18 issues
Line: 34
Column: 62
raise ValueError
Distribution._validate_args = value
def __init__(self, batch_shape=torch.Size(), event_shape=torch.Size(), validate_args=None):
self._batch_shape = batch_shape
self._event_shape = event_shape
if validate_args is not None:
self._validate_args = validate_args
if self._validate_args:
Reported by Pylint.
Line: 34
Column: 36
raise ValueError
Distribution._validate_args = value
def __init__(self, batch_shape=torch.Size(), event_shape=torch.Size(), validate_args=None):
self._batch_shape = batch_shape
self._event_shape = event_shape
if validate_args is not None:
self._validate_args = validate_args
if self._validate_args:
Reported by Pylint.
Line: 140
Column: 35
"""
return self.variance.sqrt()
def sample(self, sample_shape=torch.Size()):
"""
Generates a sample_shape shaped sample or sample_shape shaped batch of
samples if the distribution parameters are batched.
"""
with torch.no_grad():
Reported by Pylint.
Line: 148
Column: 36
with torch.no_grad():
return self.rsample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
"""
Generates a sample_shape shaped reparameterized sample or sample_shape
shaped batch of reparameterized samples if the distribution parameters
are batched.
"""
Reported by Pylint.
Line: 162
Column: 28
parameters are batched.
"""
warnings.warn('sample_n will be deprecated. Use .sample((n,)) instead', UserWarning)
return self.sample(torch.Size((n,)))
def log_prob(self, value):
"""
Returns the log of the probability density/mass function evaluated at
`value`.
Reported by Pylint.
Line: 234
Column: 16
Returns:
Tensor of shape batch_shape.
"""
return torch.exp(self.entropy())
def _extended_shape(self, sample_shape=torch.Size()):
"""
Returns the size of the sample returned by the distribution, given
a `sample_shape`. Note, that the batch and event shapes of a distribution
Reported by Pylint.
Line: 236
Column: 44
"""
return torch.exp(self.entropy())
def _extended_shape(self, sample_shape=torch.Size()):
"""
Returns the size of the sample returned by the distribution, given
a `sample_shape`. Note, that the batch and event shapes of a distribution
instance are fixed at the time of construction. If this is empty, the
returned shape is upcast to (1,).
Reported by Pylint.
Line: 246
Column: 41
Args:
sample_shape (torch.Size): the size of the sample to be drawn.
"""
if not isinstance(sample_shape, torch.Size):
sample_shape = torch.Size(sample_shape)
return sample_shape + self._batch_shape + self._event_shape
def _validate_sample(self, value):
"""
Reported by Pylint.
Line: 247
Column: 28
sample_shape (torch.Size): the size of the sample to be drawn.
"""
if not isinstance(sample_shape, torch.Size):
sample_shape = torch.Size(sample_shape)
return sample_shape + self._batch_shape + self._event_shape
def _validate_sample(self, value):
"""
Argument validation for distribution methods such as `log_prob`,
Reported by Pylint.
Line: 1
Column: 1
import torch
import warnings
from torch.distributions import constraints
from torch.distributions.utils import lazy_property
from typing import Dict, Optional, Any
class Distribution(object):
r"""
Reported by Pylint.
caffe2/python/operator_test/arg_ops_test.py
18 issues
Line: 6
Column: 1
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
class TestArgOps(serial.SerializedTestCase):
Reported by Pylint.
Line: 1
Column: 1
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
Reported by Pylint.
Line: 15
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
class TestArgOps(serial.SerializedTestCase):
@given(
X=hu.tensor(dtype=np.float32), axis=st.integers(-1, 5),
keepdims=st.booleans(), **hu.gcs)
@settings(deadline=None)
def test_argmax(self, X, axis, keepdims, gc, dc):
Reported by Pylint.
Line: 20
Column: 5
X=hu.tensor(dtype=np.float32), axis=st.integers(-1, 5),
keepdims=st.booleans(), **hu.gcs)
@settings(deadline=None)
def test_argmax(self, X, axis, keepdims, gc, dc):
if axis >= len(X.shape):
axis %= len(X.shape)
op = core.CreateOperator(
"ArgMax", ["X"], ["Indices"], axis=axis, keepdims=keepdims,
device_option=gc)
Reported by Pylint.
Line: 20
Column: 5
X=hu.tensor(dtype=np.float32), axis=st.integers(-1, 5),
keepdims=st.booleans(), **hu.gcs)
@settings(deadline=None)
def test_argmax(self, X, axis, keepdims, gc, dc):
if axis >= len(X.shape):
axis %= len(X.shape)
op = core.CreateOperator(
"ArgMax", ["X"], ["Indices"], axis=axis, keepdims=keepdims,
device_option=gc)
Reported by Pylint.
Line: 20
Column: 5
X=hu.tensor(dtype=np.float32), axis=st.integers(-1, 5),
keepdims=st.booleans(), **hu.gcs)
@settings(deadline=None)
def test_argmax(self, X, axis, keepdims, gc, dc):
if axis >= len(X.shape):
axis %= len(X.shape)
op = core.CreateOperator(
"ArgMax", ["X"], ["Indices"], axis=axis, keepdims=keepdims,
device_option=gc)
Reported by Pylint.
Line: 20
Column: 5
X=hu.tensor(dtype=np.float32), axis=st.integers(-1, 5),
keepdims=st.booleans(), **hu.gcs)
@settings(deadline=None)
def test_argmax(self, X, axis, keepdims, gc, dc):
if axis >= len(X.shape):
axis %= len(X.shape)
op = core.CreateOperator(
"ArgMax", ["X"], ["Indices"], axis=axis, keepdims=keepdims,
device_option=gc)
Reported by Pylint.
Line: 20
Column: 5
X=hu.tensor(dtype=np.float32), axis=st.integers(-1, 5),
keepdims=st.booleans(), **hu.gcs)
@settings(deadline=None)
def test_argmax(self, X, axis, keepdims, gc, dc):
if axis >= len(X.shape):
axis %= len(X.shape)
op = core.CreateOperator(
"ArgMax", ["X"], ["Indices"], axis=axis, keepdims=keepdims,
device_option=gc)
Reported by Pylint.
Line: 23
Column: 9
def test_argmax(self, X, axis, keepdims, gc, dc):
if axis >= len(X.shape):
axis %= len(X.shape)
op = core.CreateOperator(
"ArgMax", ["X"], ["Indices"], axis=axis, keepdims=keepdims,
device_option=gc)
def argmax_ref(X):
indices = np.argmax(X, axis=axis)
Reported by Pylint.
test/jit/test_tensor_creation_ops.py
18 issues
Line: 4
Column: 1
import os
import sys
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 9
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
Reported by Pylint.
Line: 9
Column: 1
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 21
Column: 5
A suite of tests for ops that create tensors.
"""
def test_randperm_default_dtype(self):
def randperm(x: int):
perm = torch.randperm(x)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert perm.dtype == torch.int64
Reported by Pylint.
Line: 22
Column: 9
"""
def test_randperm_default_dtype(self):
def randperm(x: int):
perm = torch.randperm(x)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert perm.dtype == torch.int64
Reported by Pylint.
Line: 26
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
perm = torch.randperm(x)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert perm.dtype == torch.int64
self.checkScript(randperm, (3, ))
def test_randperm_specifed_dtype(self):
def randperm(x: int):
Reported by Bandit.
Line: 30
Column: 5
self.checkScript(randperm, (3, ))
def test_randperm_specifed_dtype(self):
def randperm(x: int):
perm = torch.randperm(x, dtype=torch.float)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert perm.dtype == torch.float
Reported by Pylint.
Line: 31
Column: 9
self.checkScript(randperm, (3, ))
def test_randperm_specifed_dtype(self):
def randperm(x: int):
perm = torch.randperm(x, dtype=torch.float)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert perm.dtype == torch.float
Reported by Pylint.
Line: 35
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
perm = torch.randperm(x, dtype=torch.float)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert perm.dtype == torch.float
self.checkScript(randperm, (3, ))
def test_triu_indices_default_dtype(self):
def triu_indices(rows: int, cols: int):
Reported by Bandit.
caffe2/python/operator_test/dropout_op_test.py
18 issues
Line: 6
Column: 1
from hypothesis import assume, given, settings
import hypothesis.strategies as st
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core
Reported by Pylint.
Line: 7
Column: 1
from hypothesis import assume, given, settings
import hypothesis.strategies as st
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 25
Column: 3
**hu.gcs)
def test_dropout_is_test(self, X, in_place, ratio, engine, gc, dc):
"""Test with is_test=True for a deterministic reference impl."""
# TODO(lukeyeager): enable this path when the GPU path is fixed
if in_place:
# Skip if trying in-place on GPU
assume(not (gc.device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP} and engine == ''))
# If in-place on CPU, don't compare with GPU
dc = dc[:1]
Reported by Pylint.
Line: 54
Column: 3
@settings(deadline=10000)
def test_dropout_ratio0(self, X, in_place, output_mask, engine, gc, dc):
"""Test with ratio=0 for a deterministic reference impl."""
# TODO(lukeyeager): enable this path when the op is fixed
if in_place:
# Skip if trying in-place on GPU
assume(gc.device_type not in {caffe2_pb2.CUDA, caffe2_pb2.HIP})
# If in-place on CPU, don't compare with GPU
dc = dc[:1]
Reported by Pylint.
Line: 1
Column: 1
from hypothesis import assume, given, settings
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 16
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
class TestDropout(serial.SerializedTestCase):
@serial.given(X=hu.tensor(),
in_place=st.booleans(),
ratio=st.floats(0, 0.999),
engine=st.sampled_from(["", "CUDNN"]),
Reported by Pylint.
Line: 23
Column: 5
ratio=st.floats(0, 0.999),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_dropout_is_test(self, X, in_place, ratio, engine, gc, dc):
"""Test with is_test=True for a deterministic reference impl."""
# TODO(lukeyeager): enable this path when the GPU path is fixed
if in_place:
# Skip if trying in-place on GPU
assume(not (gc.device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP} and engine == ''))
Reported by Pylint.
Line: 23
Column: 5
ratio=st.floats(0, 0.999),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_dropout_is_test(self, X, in_place, ratio, engine, gc, dc):
"""Test with is_test=True for a deterministic reference impl."""
# TODO(lukeyeager): enable this path when the GPU path is fixed
if in_place:
# Skip if trying in-place on GPU
assume(not (gc.device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP} and engine == ''))
Reported by Pylint.
Line: 23
Column: 5
ratio=st.floats(0, 0.999),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_dropout_is_test(self, X, in_place, ratio, engine, gc, dc):
"""Test with is_test=True for a deterministic reference impl."""
# TODO(lukeyeager): enable this path when the GPU path is fixed
if in_place:
# Skip if trying in-place on GPU
assume(not (gc.device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP} and engine == ''))
Reported by Pylint.
Line: 23
Column: 5
ratio=st.floats(0, 0.999),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_dropout_is_test(self, X, in_place, ratio, engine, gc, dc):
"""Test with is_test=True for a deterministic reference impl."""
# TODO(lukeyeager): enable this path when the GPU path is fixed
if in_place:
# Skip if trying in-place on GPU
assume(not (gc.device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP} and engine == ''))
Reported by Pylint.
caffe2/python/observer_test.py
18 issues
Line: 8
Column: 1
import numpy as np
import unittest
from hypothesis import given, settings
import hypothesis.strategies as st
from caffe2.python import brew, core, model_helper, rnn_cell
import caffe2.python.workspace as ws
Reported by Pylint.
Line: 9
Column: 1
import numpy as np
import unittest
from hypothesis import given, settings
import hypothesis.strategies as st
from caffe2.python import brew, core, model_helper, rnn_cell
import caffe2.python.workspace as ws
Reported by Pylint.
Line: 84
Column: 37
)
init_blobs.extend([hidden_init, cell_init])
output, last_hidden, _, last_state = rnn_cell.LSTM(
model=model,
input_blob="input",
seq_lengths="seq_lengths",
initial_states=init_blobs,
dim_in=input_dim,
Reported by Pylint.
Line: 84
Column: 21
)
init_blobs.extend([hidden_init, cell_init])
output, last_hidden, _, last_state = rnn_cell.LSTM(
model=model,
input_blob="input",
seq_lengths="seq_lengths",
initial_states=init_blobs,
dim_in=input_dim,
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import unittest
from hypothesis import given, settings
import hypothesis.strategies as st
Reported by Pylint.
Line: 7
Column: 1
import numpy as np
import unittest
from hypothesis import given, settings
import hypothesis.strategies as st
from caffe2.python import brew, core, model_helper, rnn_cell
import caffe2.python.workspace as ws
Reported by Pylint.
Line: 15
Column: 1
import caffe2.python.workspace as ws
class TestObservers(unittest.TestCase):
def setUp(self):
core.GlobalInit(["python", "caffe2"])
ws.ResetWorkspace()
self.model = model_helper.ModelHelper()
brew.fc(self.model, "data", "y",
Reported by Pylint.
Line: 30
Column: 5
ws.RunNetOnce(self.model.param_init_net)
ws.CreateNet(self.model.net)
def testObserver(self):
ob = self.model.net.AddObserver("TimeObserver")
ws.RunNet(self.model.net)
print(ob.average_time())
num = self.model.net.NumObservers()
self.model.net.RemoveObserver(ob)
Reported by Pylint.
Line: 30
Column: 5
ws.RunNetOnce(self.model.param_init_net)
ws.CreateNet(self.model.net)
def testObserver(self):
ob = self.model.net.AddObserver("TimeObserver")
ws.RunNet(self.model.net)
print(ob.average_time())
num = self.model.net.NumObservers()
self.model.net.RemoveObserver(ob)
Reported by Pylint.
Line: 31
Column: 9
ws.CreateNet(self.model.net)
def testObserver(self):
ob = self.model.net.AddObserver("TimeObserver")
ws.RunNet(self.model.net)
print(ob.average_time())
num = self.model.net.NumObservers()
self.model.net.RemoveObserver(ob)
assert(self.model.net.NumObservers() + 1 == num)
Reported by Pylint.
caffe2/python/operator_test/given_tensor_byte_string_to_uint8_fill_op_test.py
18 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np
import unittest
Reported by Pylint.
Line: 28
Column: 1
values=[X.tobytes()],
)
def constant_fill(*args, **kw):
return [X]
self.assertReferenceChecks(gc, op, [], constant_fill)
self.assertDeviceChecks(dc, op, [], [0])
Reported by Pylint.
Line: 28
Column: 1
values=[X.tobytes()],
)
def constant_fill(*args, **kw):
return [X]
self.assertReferenceChecks(gc, op, [], constant_fill)
self.assertDeviceChecks(dc, op, [], [0])
Reported by Pylint.
Line: 45
Column: 1
values=[X.tobytes()],
)
def constant_fill(*args, **kw):
return [X]
self.assertReferenceChecks(gc, op, [], constant_fill)
self.assertDeviceChecks(dc, op, [], [0])
Reported by Pylint.
Line: 45
Column: 1
values=[X.tobytes()],
)
def constant_fill(*args, **kw):
return [X]
self.assertReferenceChecks(gc, op, [], constant_fill)
self.assertDeviceChecks(dc, op, [], [0])
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np
Reported by Pylint.
Line: 11
Column: 1
import caffe2.python.hypothesis_test_util as hu
import numpy as np
import unittest
class TestGivenTensorByteStringToUInt8FillOps(hu.HypothesisTestCase):
@given(X=hu.tensor(min_dim=1, max_dim=4, dtype=np.int32),
**hu.gcs)
Reported by Pylint.
Line: 14
Column: 1
import unittest
class TestGivenTensorByteStringToUInt8FillOps(hu.HypothesisTestCase):
@given(X=hu.tensor(min_dim=1, max_dim=4, dtype=np.int32),
**hu.gcs)
def test_given_tensor_byte_string_to_uint8_fill(self, X, gc, dc):
X = X.astype(np.uint8)
print('X: ', str(X))
Reported by Pylint.
Line: 17
Column: 5
class TestGivenTensorByteStringToUInt8FillOps(hu.HypothesisTestCase):
@given(X=hu.tensor(min_dim=1, max_dim=4, dtype=np.int32),
**hu.gcs)
def test_given_tensor_byte_string_to_uint8_fill(self, X, gc, dc):
X = X.astype(np.uint8)
print('X: ', str(X))
op = core.CreateOperator(
"GivenTensorByteStringToUInt8Fill",
[], ["Y"],
Reported by Pylint.
Line: 17
Column: 5
class TestGivenTensorByteStringToUInt8FillOps(hu.HypothesisTestCase):
@given(X=hu.tensor(min_dim=1, max_dim=4, dtype=np.int32),
**hu.gcs)
def test_given_tensor_byte_string_to_uint8_fill(self, X, gc, dc):
X = X.astype(np.uint8)
print('X: ', str(X))
op = core.CreateOperator(
"GivenTensorByteStringToUInt8Fill",
[], ["Y"],
Reported by Pylint.