The following issues were found
torch/distributed/rpc/server_process_global_profiler.py
19 issues
Line: 8
Column: 1
import torch
from torch.autograd.profiler_legacy import profile
from . import (
_disable_server_process_global_profiler,
_enable_server_process_global_profiler,
)
Reported by Pylint.
Line: 171
Column: 9
)
self.function_events._build_tree()
self.process_global_function_events = process_global_function_events
return False
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/python3
import itertools
import torch
from torch.autograd.profiler_legacy import profile
from . import (
_disable_server_process_global_profiler,
Reported by Pylint.
Line: 14
Column: 1
)
class _server_process_global_profile(profile):
"""
It has the same API as ``torch.autograd.profiler.profile`` class,
except that it enables profiling on all threads running RPC server request callbacks.
Context manager that manages autograd profiler state and holds a summary of results.
Reported by Pylint.
Line: 70
Column: 1
>>> inner_profile_rref.rpc_sync().__exit__(None, None, None)
>>> outer_profile_rref.rpc_sync().__exit__(None, None, None
>>> print(inner_profile_rref.rpc_sync().key_averages())
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls
--------- --------------- --------------- --------------- --------------- --------------- ---------------
sub 85.06% 76.275us 100.00% 89.667us 89.667us 1
empty 14.94% 13.392us 14.94% 13.392us 13.392us 1
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Reported by Pylint.
Line: 71
Column: 1
>>> outer_profile_rref.rpc_sync().__exit__(None, None, None
>>> print(inner_profile_rref.rpc_sync().key_averages())
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls
--------- --------------- --------------- --------------- --------------- --------------- ---------------
sub 85.06% 76.275us 100.00% 89.667us 89.667us 1
empty 14.94% 13.392us 14.94% 13.392us 13.392us 1
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Self CPU time total: 89.667us
Reported by Pylint.
Line: 72
Column: 1
>>> print(inner_profile_rref.rpc_sync().key_averages())
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls
--------- --------------- --------------- --------------- --------------- --------------- ---------------
sub 85.06% 76.275us 100.00% 89.667us 89.667us 1
empty 14.94% 13.392us 14.94% 13.392us 13.392us 1
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Self CPU time total: 89.667us
>>> print(outer_profile_rref.rpc_sync().key_averages())
Reported by Pylint.
Line: 73
Column: 1
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls
--------- --------------- --------------- --------------- --------------- --------------- ---------------
sub 85.06% 76.275us 100.00% 89.667us 89.667us 1
empty 14.94% 13.392us 14.94% 13.392us 13.392us 1
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Self CPU time total: 89.667us
>>> print(outer_profile_rref.rpc_sync().key_averages())
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Reported by Pylint.
Line: 74
Column: 1
Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls
--------- --------------- --------------- --------------- --------------- --------------- ---------------
sub 85.06% 76.275us 100.00% 89.667us 89.667us 1
empty 14.94% 13.392us 14.94% 13.392us 13.392us 1
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Self CPU time total: 89.667us
>>> print(outer_profile_rref.rpc_sync().key_averages())
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls
Reported by Pylint.
Line: 75
Column: 1
--------- --------------- --------------- --------------- --------------- --------------- ---------------
sub 85.06% 76.275us 100.00% 89.667us 89.667us 1
empty 14.94% 13.392us 14.94% 13.392us 13.392us 1
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Self CPU time total: 89.667us
>>> print(outer_profile_rref.rpc_sync().key_averages())
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Reported by Pylint.
third_party/build_bundled.py
19 issues
Line: 11
Column: 15
def collect_license(current):
collected = {}
for root, dirs, files in os.walk(current):
license = list(licenses & set(files))
if license:
name = root.split('/')[-1]
license_file = os.path.join(root, license[0])
try:
Reported by Pylint.
Line: 12
Column: 9
def collect_license(current):
collected = {}
for root, dirs, files in os.walk(current):
license = list(licenses & set(files))
if license:
name = root.split('/')[-1]
license_file = os.path.join(root, license[0])
try:
ident = identify_license(license_file)
Reported by Pylint.
Line: 71
Column: 21
t = t.replace('``', '"').replace("''", '"')
return t
with open(f) as fid:
txt = fid.read()
if not exception and 'exception' in txt:
license = identify_license(f, 'exception')
return license + ' with exception'
txt = squeeze(txt)
Reported by Pylint.
Line: 74
Column: 13
with open(f) as fid:
txt = fid.read()
if not exception and 'exception' in txt:
license = identify_license(f, 'exception')
return license + ' with exception'
txt = squeeze(txt)
if 'ApacheLicense' in txt:
# Hmm, do we need to check the text?
return 'Apache-2.0'
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
import os
mydir = os.path.dirname(__file__)
licenses = {'LICENSE', 'LICENSE.txt', 'LICENSE.rst', 'COPYING.BSD'}
def collect_license(current):
Reported by Pylint.
Line: 9
Column: 1
licenses = {'LICENSE', 'LICENSE.txt', 'LICENSE.rst', 'COPYING.BSD'}
def collect_license(current):
collected = {}
for root, dirs, files in os.walk(current):
license = list(licenses & set(files))
if license:
name = root.split('/')[-1]
Reported by Pylint.
Line: 39
Column: 1
return collected
def create_bundled(d, outstream):
"""Write the information to an open outstream"""
collected = collect_license(d)
sorted_keys = sorted(collected.keys())
outstream.write('The Pytorch repository and source distributions bundle '
'several libraries that are \n')
Reported by Pylint.
Line: 47
Column: 9
'several libraries that are \n')
outstream.write('compatibly licensed. We list these here.\n\n')
for k in sorted_keys:
c = collected[k]
files = ',\n '.join(c['Files'])
license_file = ',\n '.join(c['License_file'])
outstream.write(f"Name: {c['Name']}\n")
outstream.write(f"License: {c['License']}\n")
outstream.write(f"Files: {files}\n")
Reported by Pylint.
Line: 55
Column: 32
outstream.write(f"Files: {files}\n")
outstream.write(' For details, see ')
outstream.write(license_file)
outstream.write('\n\n')
def identify_license(f, exception=''):
"""
Read f and try to identify the license type
Reported by Pylint.
Line: 58
Column: 1
outstream.write('\n\n')
def identify_license(f, exception=''):
"""
Read f and try to identify the license type
This is __very__ rough and probably not legally binding, it is specific for
this repo.
"""
Reported by Pylint.
torch/fx/experimental/const_fold.py
19 issues
Line: 214
Column: 36
setattr(
mod_traced,
const_output_name,
torch.nn.Parameter(torch.randn(1)),
)
with split.submod_1.graph.inserting_before(node):
node.replace_all_uses_with(split.submod_1.graph.get_attr(const_output_name))
split.submod_1.graph.erase_node(node)
ph_idx += 1
Reported by Pylint.
Line: 16
Column: 1
return x if isinstance(x, tuple) else (x,)
class FoldedGraphModule(torch.fx.GraphModule):
"""
FoldedGraphModule is a GraphModule which also contains another
`const_subgraph_module` representing a subgraph which has all const attr
inputs and which can be run once before running the main standard
`graph`. The `const_output_names` are the ordered list names of attrs which
Reported by Pylint.
Line: 176
Column: 21
else:
if (
node.op == "call_function"
and node.target == operator.__getitem__
and node.args[0].target == "submod_0"
):
const_output_name = const_output_names[node.args[1]]
# Now map from the index of the constant into calling submod_1 and map
Reported by Pylint.
Line: 1
Column: 1
import operator
from typing import Dict, Set, List, Optional
import torch.fx
from torch.fx.passes.split_module import split_module
import re
def _make_tuple(x):
Reported by Pylint.
Line: 6
Column: 1
import torch.fx
from torch.fx.passes.split_module import split_module
import re
def _make_tuple(x):
"""
Helper to convert x into a one item tuple if it's not a tuple already.
Reported by Pylint.
Line: 9
Column: 1
import re
def _make_tuple(x):
"""
Helper to convert x into a one item tuple if it's not a tuple already.
"""
return x if isinstance(x, tuple) else (x,)
Reported by Pylint.
Line: 47
Column: 5
self.run_folding()
return super().__call__(*args)
def run_folding(self):
# If there's no const subgraph module or attr output names to use, return
# early as there is no const folding to perform.
if self.const_subgraph_module is None or self.const_output_names is None:
return
Reported by Pylint.
Line: 53
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if self.const_subgraph_module is None or self.const_output_names is None:
return
assert not self.has_folding_been_run
self.has_folding_been_run = True
# Actually run const folding subgraph. We _make_tuple here because
# single attr const fold subgraphs output a single Tensor while
# multiple outputs are returned as Tuple[Tensor,].
Reported by Bandit.
Line: 63
Column: 9
# Look for output node from const folding subgraph and set attrs on the
# module with the results.
for i in range(len(folded_attrs)):
setattr(
self, self.const_output_names[i], torch.nn.Parameter(folded_attrs[i])
)
Reported by Pylint.
Line: 69
Column: 1
)
def split_const_subgraphs(
module: torch.nn.Module,
) -> FoldedGraphModule:
"""
Looks through `module` for any nodes that have all constant attribute inputs
and separates them out into their own constant subgraph, and returns a
Reported by Pylint.
tools/codegen/api/native.py
19 issues
Line: 23
Column: 5
# (no translation is needed from dispatcher API to native API).
def name(func: FunctionSchema) -> str:
name = str(func.name.name)
# TODO: delete this!
if func.is_out_fn():
name += '_out'
if func.name.overload_name:
name += f'_{func.name.overload_name}'
Reported by Pylint.
Line: 24
Column: 3
def name(func: FunctionSchema) -> str:
name = str(func.name.name)
# TODO: delete this!
if func.is_out_fn():
name += '_out'
if func.name.overload_name:
name += f'_{func.name.overload_name}'
return name
Reported by Pylint.
Line: 76
Column: 3
default = None
if should_default:
default = '{}'
# TODO: Not sure why the arguments assigned here are for
# TensorOptionsArguments and not the constituent pieces. It seems
# to matter
return [
Binding(
nctype=NamedCType('dtype', OptionalCType(BaseCType(scalarTypeT))),
Reported by Pylint.
Line: 1
Column: 1
from tools.codegen.model import (Argument, FunctionSchema, Return,
SelfArgument, TensorOptionsArguments, Type,
assert_never)
from tools.codegen.api.types import (ArgName, BaseCType, Binding,
ConstRefCType, NamedCType, CType, MutRefCType, ListCType,
OptionalCType, tensorT, scalarT, layoutT,
deviceT, boolT, scalarTypeT)
from tools.codegen.api import cpp
Reported by Pylint.
Line: 12
Column: 1
from tools.codegen.api import cpp
from tools.codegen import local
from typing import Union, Sequence, List, Optional
# This file describes the translation of JIT schema to the native functions API.
# This looks a lot like the C++ API (which makes historical sense, because the
# idea was you wrote native functions to implement functions in the C++ API),
# but over time we have evolved the C++ API without actually changing our
Reported by Pylint.
Line: 22
Column: 1
# line up as closely as possible, since this results in the least overhead
# (no translation is needed from dispatcher API to native API).
def name(func: FunctionSchema) -> str:
name = str(func.name.name)
# TODO: delete this!
if func.is_out_fn():
name += '_out'
if func.name.overload_name:
Reported by Pylint.
Line: 31
Column: 1
name += f'_{func.name.overload_name}'
return name
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
if str(t) == 'Tensor?':
tensor_type: OptionalCType = OptionalCType(BaseCType(tensorT))
if mutable and not local.use_const_ref_for_mutable_tensors():
return NamedCType(binds, MutRefCType(tensor_type))
else:
Reported by Pylint.
Line: 31
Column: 1
name += f'_{func.name.overload_name}'
return name
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
if str(t) == 'Tensor?':
tensor_type: OptionalCType = OptionalCType(BaseCType(tensorT))
if mutable and not local.use_const_ref_for_mutable_tensors():
return NamedCType(binds, MutRefCType(tensor_type))
else:
Reported by Pylint.
Line: 34
Column: 9
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
if str(t) == 'Tensor?':
tensor_type: OptionalCType = OptionalCType(BaseCType(tensorT))
if mutable and not local.use_const_ref_for_mutable_tensors():
return NamedCType(binds, MutRefCType(tensor_type))
else:
return NamedCType(binds, ConstRefCType(tensor_type))
elif str(t) == 'Tensor?[]':
return NamedCType(binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT)))))
Reported by Pylint.
Line: 46
Column: 1
return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
return cpp.argumenttype_type(t, mutable=mutable, binds=binds)
def returns_type(rs: Sequence[Return]) -> CType:
return cpp.returns_type(rs)
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
Reported by Pylint.
tools/test/test_translate_annotations.py
19 issues
Line: 4
Column: 1
import re
import unittest
from tools.linter.translate_annotations import parse_annotation, parse_diff, translate
flake8_regex \
= r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorCode>\w+\d+) (?P<errorDesc>.*)'
clang_tidy_regex \
= r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorDesc>.*?) \[(?P<errorCode>.*)\]'
Reported by Pylint.
Line: 1
Column: 1
import re
import unittest
from tools.linter.translate_annotations import parse_annotation, parse_diff, translate
flake8_regex \
= r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorCode>\w+\d+) (?P<errorDesc>.*)'
clang_tidy_regex \
= r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorDesc>.*?) \[(?P<errorCode>.*)\]'
Reported by Pylint.
Line: 6
Column: 1
from tools.linter.translate_annotations import parse_annotation, parse_diff, translate
flake8_regex \
= r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorCode>\w+\d+) (?P<errorDesc>.*)'
clang_tidy_regex \
= r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorDesc>.*?) \[(?P<errorCode>.*)\]'
# in the below example patch, note that the filenames differ, so the
Reported by Pylint.
Line: 7
Column: 1
from tools.linter.translate_annotations import parse_annotation, parse_diff, translate
flake8_regex \
= r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorCode>\w+\d+) (?P<errorDesc>.*)'
clang_tidy_regex \
= r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorDesc>.*?) \[(?P<errorCode>.*)\]'
# in the below example patch, note that the filenames differ, so the
# translation should reflect that as well as the line numbers
Reported by Pylint.
Line: 8
Column: 1
flake8_regex \
= r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorCode>\w+\d+) (?P<errorDesc>.*)'
clang_tidy_regex \
= r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorDesc>.*?) \[(?P<errorCode>.*)\]'
# in the below example patch, note that the filenames differ, so the
# translation should reflect that as well as the line numbers
Reported by Pylint.
Line: 9
Column: 1
flake8_regex \
= r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorCode>\w+\d+) (?P<errorDesc>.*)'
clang_tidy_regex \
= r'^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorDesc>.*?) \[(?P<errorCode>.*)\]'
# in the below example patch, note that the filenames differ, so the
# translation should reflect that as well as the line numbers
# $ git clone -b 1.0.2 https://github.com/cscorley/whatthepatch.git
Reported by Pylint.
Line: 17
Column: 1
# $ git clone -b 1.0.2 https://github.com/cscorley/whatthepatch.git
# $ cd whatthepatch/tests/casefiles
# $ git diff --no-index --unified=0 lao tzu
lao_tzu_diff = '''
diff --git a/lao b/tzu
index 635ef2c..5af88a8 100644
--- a/lao
+++ b/tzu
@@ -1,2 +0,0 @@
Reported by Pylint.
Line: 35
Column: 1
+The door of all subtleties!
'''.lstrip()
sparser_diff = '''
diff --git a/foo.txt b/bar.txt
index 27a6dad..6fae323 100644
--- a/foo.txt
+++ b/bar.txt
@@ -4,3 +4,2 @@ lines
Reported by Pylint.
Line: 51
Column: 1
-even more
'''.lstrip()
new_file_diff = '''
diff --git a/torch/csrc/jit/tensorexpr/operators/conv2d.h b/torch/csrc/jit/tensorexpr/operators/conv2d.h
new file mode 100644
index 0000000000..a81eeae346
--- /dev/null
+++ b/torch/csrc/jit/tensorexpr/operators/conv2d.h
Reported by Pylint.
Line: 80
Column: 1
'''.lstrip()
# fun fact, this example fools VS Code's diff syntax highlighter
haskell_diff = '''
diff --git a/hello.hs b/hello.hs
index ffb8d4ad14..0872ac9db6 100644
--- a/hello.hs
+++ b/hello.hs
@@ -1 +1 @@
Reported by Pylint.
test/test_type_info.py
18 issues
Line: 1
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY, load_tests
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
import torch
import unittest
Reported by Pylint.
Line: 7
Column: 1
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
import torch
import unittest
if TEST_NUMPY:
import numpy as np
Reported by Pylint.
Line: 5
Column: 1
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
import torch
import unittest
if TEST_NUMPY:
Reported by Pylint.
Line: 1
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY, load_tests
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
import torch
import unittest
Reported by Pylint.
Line: 7
Column: 1
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
import torch
import unittest
if TEST_NUMPY:
import numpy as np
Reported by Pylint.
Line: 8
Column: 1
load_tests = load_tests
import torch
import unittest
if TEST_NUMPY:
import numpy as np
Reported by Pylint.
Line: 8
Column: 1
load_tests = load_tests
import torch
import unittest
if TEST_NUMPY:
import numpy as np
Reported by Pylint.
Line: 14
Column: 1
import numpy as np
class TestDTypeInfo(TestCase):
def test_invalid_input(self):
for dtype in [torch.float16, torch.float32, torch.float64, torch.bfloat16, torch.complex64, torch.complex128, torch.bool]:
with self.assertRaises(TypeError):
_ = torch.iinfo(dtype)
Reported by Pylint.
Line: 16
Column: 5
class TestDTypeInfo(TestCase):
def test_invalid_input(self):
for dtype in [torch.float16, torch.float32, torch.float64, torch.bfloat16, torch.complex64, torch.complex128, torch.bool]:
with self.assertRaises(TypeError):
_ = torch.iinfo(dtype)
for dtype in [torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool]:
Reported by Pylint.
Line: 17
Column: 1
class TestDTypeInfo(TestCase):
def test_invalid_input(self):
for dtype in [torch.float16, torch.float32, torch.float64, torch.bfloat16, torch.complex64, torch.complex128, torch.bool]:
with self.assertRaises(TypeError):
_ = torch.iinfo(dtype)
for dtype in [torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool]:
with self.assertRaises(TypeError):
Reported by Pylint.
tools/generate_torch_version.py
18 issues
Line: 8
Column: 13
from setuptools import distutils # type: ignore[import]
from typing import Optional, Union
def get_sha(pytorch_root: Union[str, Path]) -> str:
try:
return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=pytorch_root).decode('ascii').strip()
except Exception:
return 'Unknown'
Reported by Pylint.
Line: 11
Column: 12
def get_sha(pytorch_root: Union[str, Path]) -> str:
try:
return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=pytorch_root).decode('ascii').strip()
except Exception:
return 'Unknown'
def get_torch_version(sha: Optional[str] = None) -> str:
pytorch_root = Path(__file__).parent.parent
version = open(pytorch_root / 'version.txt', 'r').read().strip()
Reported by Pylint.
Line: 14
Column: 23
except Exception:
return 'Unknown'
def get_torch_version(sha: Optional[str] = None) -> str:
pytorch_root = Path(__file__).parent.parent
version = open(pytorch_root / 'version.txt', 'r').read().strip()
if os.getenv('PYTORCH_BUILD_VERSION'):
assert os.getenv('PYTORCH_BUILD_NUMBER') is not None
Reported by Pylint.
Line: 15
Column: 5
return 'Unknown'
def get_torch_version(sha: Optional[str] = None) -> str:
pytorch_root = Path(__file__).parent.parent
version = open(pytorch_root / 'version.txt', 'r').read().strip()
if os.getenv('PYTORCH_BUILD_VERSION'):
assert os.getenv('PYTORCH_BUILD_NUMBER') is not None
build_number = int(os.getenv('PYTORCH_BUILD_NUMBER', ""))
Reported by Pylint.
Line: 16
Column: 5
def get_torch_version(sha: Optional[str] = None) -> str:
pytorch_root = Path(__file__).parent.parent
version = open(pytorch_root / 'version.txt', 'r').read().strip()
if os.getenv('PYTORCH_BUILD_VERSION'):
assert os.getenv('PYTORCH_BUILD_NUMBER') is not None
build_number = int(os.getenv('PYTORCH_BUILD_NUMBER', ""))
version = os.getenv('PYTORCH_BUILD_VERSION', "")
Reported by Pylint.
Line: 1
Column: 1
import argparse
import os
import subprocess
from pathlib import Path
from setuptools import distutils # type: ignore[import]
from typing import Optional, Union
def get_sha(pytorch_root: Union[str, Path]) -> str:
try:
Reported by Pylint.
Line: 3
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
import argparse
import os
import subprocess
from pathlib import Path
from setuptools import distutils # type: ignore[import]
from typing import Optional, Union
def get_sha(pytorch_root: Union[str, Path]) -> str:
try:
Reported by Bandit.
Line: 6
Column: 1
import subprocess
from pathlib import Path
from setuptools import distutils # type: ignore[import]
from typing import Optional, Union
def get_sha(pytorch_root: Union[str, Path]) -> str:
try:
return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=pytorch_root).decode('ascii').strip()
except Exception:
Reported by Pylint.
Line: 8
Column: 1
from setuptools import distutils # type: ignore[import]
from typing import Optional, Union
def get_sha(pytorch_root: Union[str, Path]) -> str:
try:
return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=pytorch_root).decode('ascii').strip()
except Exception:
return 'Unknown'
Reported by Pylint.
Line: 10
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b603_subprocess_without_shell_equals_true.html
def get_sha(pytorch_root: Union[str, Path]) -> str:
try:
return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=pytorch_root).decode('ascii').strip()
except Exception:
return 'Unknown'
def get_torch_version(sha: Optional[str] = None) -> str:
pytorch_root = Path(__file__).parent.parent
Reported by Bandit.
torch/csrc/utils/byte_order.cpp
18 issues
Line: 185
CWE codes:
908
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
union { uint32_t x; float f; };
x = (order == THP_BIG_ENDIAN ? decodeUInt32BE(src) : decodeUInt32LE(src));
dst[i] = f;
src += sizeof(float);
}
}
void THP_decodeDoubleBuffer(double* dst, const uint8_t* src, THPByteOrder order, size_t len)
Reported by Cppcheck.
Line: 196
CWE codes:
908
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
union { uint64_t x; double d; };
x = (order == THP_BIG_ENDIAN ? decodeUInt64BE(src) : decodeUInt64LE(src));
dst[i] = d;
src += sizeof(double);
}
}
void THP_decodeComplexFloatBuffer(c10::complex<float>* dst, const uint8_t* src, THPByteOrder order, size_t len)
Reported by Cppcheck.
Line: 18
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
{
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
uint16_t output;
memcpy(&output, ptr, sizeof(uint16_t));
#if defined(_MSC_VER) && !defined(_DEBUG)
output = _byteswap_ushort(output);
#elif defined(__llvm__) || defined(__GNUC__) && !defined(__ICC)
output = __builtin_bswap16(output);
#else
Reported by FlawFinder.
Line: 28
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
uint16_t Lo = output << 8;
output = Hi | Lo;
#endif
memcpy(ptr, &output, sizeof(uint16_t));
}
static inline void swapBytes32(void *ptr)
{
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
Reported by FlawFinder.
Line: 35
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
{
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
uint32_t output;
memcpy(&output, ptr, sizeof(uint32_t));
#if defined(_MSC_VER) && !defined(_DEBUG)
output = _byteswap_ulong(output);
#elif defined(__llvm__) || defined(__GNUC__) && !defined(__ICC)
output = __builtin_bswap32(output);
#else
Reported by FlawFinder.
Line: 47
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
uint32_t Byte3 = output & 0xFF000000;
output = (Byte0 << 24) | (Byte1 << 8) | (Byte2 >> 8) | (Byte3 >> 24);
#endif
memcpy(ptr, &output, sizeof(uint32_t));
}
static inline void swapBytes64(void *ptr)
{
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
Reported by FlawFinder.
Line: 54
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
{
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
uint64_t output;
memcpy(&output, ptr, sizeof(uint64_t));
#if defined(_MSC_VER)
output = _byteswap_uint64(output);
#elif defined(__llvm__) || defined(__GNUC__) && !defined(__ICC)
output = __builtin_bswap64(output);
#else
Reported by FlawFinder.
Line: 71
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
output = (Byte0 << (7*8)) | (Byte1 << (5*8)) | (Byte2 << (3*8)) | (Byte3 << (1*8)) |
(Byte7 >> (7*8)) | (Byte6 >> (5*8)) | (Byte5 >> (3*8)) | (Byte4 >> (1*8));
#endif
memcpy(ptr, &output, sizeof(uint64_t));
}
static inline uint16_t decodeUInt16LE(const uint8_t *data) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
uint16_t output;
Reported by FlawFinder.
Line: 77
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
static inline uint16_t decodeUInt16LE(const uint8_t *data) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
uint16_t output;
memcpy(&output, data, sizeof(uint16_t));
return output;
}
static inline uint16_t decodeUInt16BE(const uint8_t *data) {
uint16_t output = decodeUInt16LE(data);
Reported by FlawFinder.
Line: 90
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
static inline uint32_t decodeUInt32LE(const uint8_t *data) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
uint32_t output;
memcpy(&output, data, sizeof(uint32_t));
return output;
}
static inline uint32_t decodeUInt32BE(const uint8_t *data) {
uint32_t output = decodeUInt32LE(data);
Reported by FlawFinder.
test/test_type_hints.py
18 issues
Line: 2
Column: 1
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, set_cwd
import tempfile
import torch
import doctest
import os
import inspect
from pathlib import Path
Reported by Pylint.
Line: 4
Column: 1
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, set_cwd
import tempfile
import torch
import doctest
import os
import inspect
from pathlib import Path
Reported by Pylint.
Line: 35
Column: 5
blocklist = {
"_np",
}
allexamples = ""
example_file_lines = [
"import torch",
"import torch.nn.functional as F",
"import math",
Reported by Pylint.
Line: 123
Column: 3
except OSError:
raise unittest.SkipTest('cannot symlink') from None
repo_rootdir = Path(__file__).resolve().parent.parent
# TODO: Would be better not to chdir here, this affects the
# entire process!
with set_cwd(str(repo_rootdir)):
(stdout, stderr, result) = mypy.api.run([
'--cache-dir=.mypy_cache/doc',
'--no-strict-optional', # needed because of torch.lu_unpack, see gh-36584
Reported by Pylint.
Line: 1
Column: 1
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, set_cwd
import tempfile
import torch
import doctest
import os
import inspect
from pathlib import Path
Reported by Pylint.
Line: 3
Column: 1
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, set_cwd
import tempfile
import torch
import doctest
import os
import inspect
from pathlib import Path
Reported by Pylint.
Line: 5
Column: 1
from torch.testing._internal.common_utils import TestCase, run_tests, set_cwd
import tempfile
import torch
import doctest
import os
import inspect
from pathlib import Path
try:
Reported by Pylint.
Line: 6
Column: 1
import tempfile
import torch
import doctest
import os
import inspect
from pathlib import Path
try:
import mypy.api
Reported by Pylint.
Line: 7
Column: 1
import torch
import doctest
import os
import inspect
from pathlib import Path
try:
import mypy.api
HAVE_MYPY = True
Reported by Pylint.
Line: 8
Column: 1
import doctest
import os
import inspect
from pathlib import Path
try:
import mypy.api
HAVE_MYPY = True
except ImportError:
Reported by Pylint.
torch/fx/experimental/fx2trt/converters/convolution.py
18 issues
Line: 2
Column: 1
import torch
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import extend_attr_to_tuple, mark_as_int8_layer, to_numpy, get_dyn_range
def common_conv(network, mod, dimension, input_val, layer_name, is_quantized):
if mod.padding_mode != "zeros":
raise RuntimeError(f"Only support padding mode: zeros, got {mod.padding_mode}.")
Reported by Pylint.
Line: 5
Column: 1
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import extend_attr_to_tuple, mark_as_int8_layer, to_numpy, get_dyn_range
def common_conv(network, mod, dimension, input_val, layer_name, is_quantized):
if mod.padding_mode != "zeros":
raise RuntimeError(f"Only support padding mode: zeros, got {mod.padding_mode}.")
Reported by Pylint.
Line: 34
Column: 76
if is_quantized:
# Assume the dtype of activation is torch.quint8
mark_as_int8_layer(layer, get_dyn_range(mod.scale, mod.zero_point, torch.quint8))
return layer.get_output(0)
def common_conv_relu(network, mod, dimension, input_val, layer_name, is_quantized):
Reported by Pylint.
Line: 39
Column: 36
return layer.get_output(0)
def common_conv_relu(network, mod, dimension, input_val, layer_name, is_quantized):
conv_output = common_conv(
network,
mod,
dimension=2,
input_val=input_val,
Reported by Pylint.
Line: 73
Column: 45
@tensorrt_converter(torch.nn.quantized.modules.conv.Conv2d)
def quantized_conv2d(network, submod, args, kwargs, layer_name):
input_val = args[0]
if not isinstance(input_val, trt.tensorrt.ITensor):
raise RuntimeError(f'Quantized Conv2d received input {input_val} that is not part '
'of the TensorRT region!')
Reported by Pylint.
Line: 84
Column: 50
@tensorrt_converter(torch.nn.intrinsic.quantized.modules.ConvReLU2d)
def quantized_conv_relu2d(network, submod, args, kwargs, layer_name):
input_val = args[0]
if not isinstance(input_val, trt.tensorrt.ITensor):
raise RuntimeError(f'Quantized ConvReLU2d received input {input_val} that is not part '
'of the TensorRT region!')
Reported by Pylint.
Line: 1
Column: 1
import torch
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import extend_attr_to_tuple, mark_as_int8_layer, to_numpy, get_dyn_range
def common_conv(network, mod, dimension, input_val, layer_name, is_quantized):
if mod.padding_mode != "zeros":
raise RuntimeError(f"Only support padding mode: zeros, got {mod.padding_mode}.")
Reported by Pylint.
Line: 7
Column: 1
from .helper_functions import extend_attr_to_tuple, mark_as_int8_layer, to_numpy, get_dyn_range
def common_conv(network, mod, dimension, input_val, layer_name, is_quantized):
if mod.padding_mode != "zeros":
raise RuntimeError(f"Only support padding mode: zeros, got {mod.padding_mode}.")
kernel_size = extend_attr_to_tuple(mod, "kernel_size", dimension)
stride = extend_attr_to_tuple(mod, "stride", dimension)
Reported by Pylint.
Line: 7
Column: 1
from .helper_functions import extend_attr_to_tuple, mark_as_int8_layer, to_numpy, get_dyn_range
def common_conv(network, mod, dimension, input_val, layer_name, is_quantized):
if mod.padding_mode != "zeros":
raise RuntimeError(f"Only support padding mode: zeros, got {mod.padding_mode}.")
kernel_size = extend_attr_to_tuple(mod, "kernel_size", dimension)
stride = extend_attr_to_tuple(mod, "stride", dimension)
Reported by Pylint.
Line: 39
Column: 1
return layer.get_output(0)
def common_conv_relu(network, mod, dimension, input_val, layer_name, is_quantized):
conv_output = common_conv(
network,
mod,
dimension=2,
input_val=input_val,
Reported by Pylint.