The following issues were found
third_party/miniz-2.0.8/examples/example2.c
11 issues
Line: 59
Column: 5
CWE codes:
120
Suggestion:
Use sprintf_s, snprintf, or vsnprintf
for (i = (N - 1); i >= 0; --i)
{
sprintf(archive_filename, "%u.txt", i);
sprintf(data, "%u %s %u", (N - 1) - i, s_pTest_str, i);
// Add a new file to the archive. Note this is an IN-PLACE operation, so if it fails your archive is probably hosed (its central directory may not be complete) but it should be recoverable using zip -F or -FF. So use caution with this guy.
// A more robust way to add a file to an archive would be to read it into memory, perform the operation, then write a new archive out to a temp file and then delete/rename the files.
// Or, write a new archive to disk to a temp file, then delete/rename the files. For this test this API is fine.
status = mz_zip_add_mem_to_archive_file_in_place(s_Test_archive_filename, archive_filename, data, 2, s_pComment, (uint16)strlen(s_pComment), MZ_BEST_COMPRESSION);
Reported by FlawFinder.
Line: 131
Column: 7
CWE codes:
120
Suggestion:
Use sprintf_s, snprintf, or vsnprintf
for (i = 0; i < N; i++)
{
sprintf(archive_filename, "%u.txt", i);
sprintf(data, "%u %s %u", (N - 1) - i, s_pTest_str, i);
// Try to extract all the files to the heap.
p = mz_zip_reader_extract_file_to_heap(&zip_archive, archive_filename, &uncomp_size, 0);
if (!p)
{
Reported by FlawFinder.
Line: 42
Column: 3
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
mz_zip_archive zip_archive;
void *p;
const int N = 50;
char data[2048];
char archive_filename[64];
static const char *s_Test_archive_filename = "__mz_example2_test__.zip";
assert((strlen(s_pTest_str) + 64) < sizeof(data));
Reported by FlawFinder.
Line: 43
Column: 3
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
void *p;
const int N = 50;
char data[2048];
char archive_filename[64];
static const char *s_Test_archive_filename = "__mz_example2_test__.zip";
assert((strlen(s_pTest_str) + 64) < sizeof(data));
printf("miniz.c version: %s\n", MZ_VERSION);
Reported by FlawFinder.
Line: 58
Column: 5
CWE codes:
120
Suggestion:
Use sprintf_s, snprintf, or vsnprintf
// Append a bunch of text files to the test archive
for (i = (N - 1); i >= 0; --i)
{
sprintf(archive_filename, "%u.txt", i);
sprintf(data, "%u %s %u", (N - 1) - i, s_pTest_str, i);
// Add a new file to the archive. Note this is an IN-PLACE operation, so if it fails your archive is probably hosed (its central directory may not be complete) but it should be recoverable using zip -F or -FF. So use caution with this guy.
// A more robust way to add a file to an archive would be to read it into memory, perform the operation, then write a new archive out to a temp file and then delete/rename the files.
// Or, write a new archive to disk to a temp file, then delete/rename the files. For this test this API is fine.
Reported by FlawFinder.
Line: 130
Column: 7
CWE codes:
120
Suggestion:
Use sprintf_s, snprintf, or vsnprintf
for (i = 0; i < N; i++)
{
sprintf(archive_filename, "%u.txt", i);
sprintf(data, "%u %s %u", (N - 1) - i, s_pTest_str, i);
// Try to extract all the files to the heap.
p = mz_zip_reader_extract_file_to_heap(&zip_archive, archive_filename, &uncomp_size, 0);
if (!p)
Reported by FlawFinder.
Line: 46
Column: 11
CWE codes:
126
char archive_filename[64];
static const char *s_Test_archive_filename = "__mz_example2_test__.zip";
assert((strlen(s_pTest_str) + 64) < sizeof(data));
printf("miniz.c version: %s\n", MZ_VERSION);
(void)argc, (void)argv;
Reported by FlawFinder.
Line: 64
Column: 126
CWE codes:
126
// Add a new file to the archive. Note this is an IN-PLACE operation, so if it fails your archive is probably hosed (its central directory may not be complete) but it should be recoverable using zip -F or -FF. So use caution with this guy.
// A more robust way to add a file to an archive would be to read it into memory, perform the operation, then write a new archive out to a temp file and then delete/rename the files.
// Or, write a new archive to disk to a temp file, then delete/rename the files. For this test this API is fine.
status = mz_zip_add_mem_to_archive_file_in_place(s_Test_archive_filename, archive_filename, data, 2, s_pComment, (uint16)strlen(s_pComment), MZ_BEST_COMPRESSION);
if (!status)
{
printf("mz_zip_add_mem_to_archive_file_in_place failed!\n");
return EXIT_FAILURE;
}
Reported by FlawFinder.
Line: 73
Column: 122
CWE codes:
126
}
// Add a directory entry for testing
status = mz_zip_add_mem_to_archive_file_in_place(s_Test_archive_filename, "directory/", NULL, 0, "no comment", (uint16)strlen("no comment"), MZ_BEST_COMPRESSION);
if (!status)
{
printf("mz_zip_add_mem_to_archive_file_in_place failed!\n");
return EXIT_FAILURE;
}
Reported by FlawFinder.
Line: 143
Column: 28
CWE codes:
126
}
// Make sure the extraction really succeeded.
if ((uncomp_size != (strlen(data) + 1)) || (memcmp(p, data, strlen(data))))
{
printf("mz_zip_reader_extract_file_to_heap() failed to extract the proper data\n");
mz_free(p);
mz_zip_reader_end(&zip_archive);
return EXIT_FAILURE;
Reported by FlawFinder.
torch/fx/experimental/fx2trt/converters/mul.py
11 issues
Line: 3
Column: 1
import operator
import torch
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import get_dyn_range, mark_as_int8_layer
@tensorrt_converter(torch.mul)
@tensorrt_converter(operator.mul)
Reported by Pylint.
Line: 6
Column: 1
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import get_dyn_range, mark_as_int8_layer
@tensorrt_converter(torch.mul)
@tensorrt_converter(operator.mul)
def mul(network, target, args, kwargs, layer_name):
# operator.mul
Reported by Pylint.
Line: 8
Column: 21
from .helper_functions import get_dyn_range, mark_as_int8_layer
@tensorrt_converter(torch.mul)
@tensorrt_converter(operator.mul)
def mul(network, target, args, kwargs, layer_name):
# operator.mul
if len(kwargs) == 0:
lhs_val, rhs_val = args
Reported by Pylint.
Line: 37
Column: 70
layer = network.add_elementwise(lhs_val, rhs_val, trt.ElementWiseOperation.PROD)
layer.name = layer_name
dyn_range = get_dyn_range(kwargs["scale"], kwargs["zero_point"], torch.quint8)
mark_as_int8_layer(layer, dyn_range)
return layer.get_output(0)
Reported by Pylint.
Line: 10
Column: 18
@tensorrt_converter(torch.mul)
@tensorrt_converter(operator.mul)
def mul(network, target, args, kwargs, layer_name):
# operator.mul
if len(kwargs) == 0:
lhs_val, rhs_val = args
else:
# torch.mul
Reported by Pylint.
Line: 28
Column: 28
@tensorrt_converter(torch.ops.quantized.mul)
def quantized_mul(network, target, args, kwargs, layer_name):
assert len(args) == 0
lhs_val, rhs_val = kwargs["qa"], kwargs["qb"]
if not all(isinstance(i, trt.tensorrt.ITensor) for i in [lhs_val, rhs_val]):
raise RuntimeError('Quantized mul received an input that is not part of the TensorRT region!')
Reported by Pylint.
Line: 1
Column: 1
import operator
import torch
import tensorrt as trt
from torch.fx.experimental.fx2trt.fx2trt import tensorrt_converter
from .helper_functions import get_dyn_range, mark_as_int8_layer
@tensorrt_converter(torch.mul)
@tensorrt_converter(operator.mul)
Reported by Pylint.
Line: 10
Column: 1
@tensorrt_converter(torch.mul)
@tensorrt_converter(operator.mul)
def mul(network, target, args, kwargs, layer_name):
# operator.mul
if len(kwargs) == 0:
lhs_val, rhs_val = args
else:
# torch.mul
Reported by Pylint.
Line: 28
Column: 1
@tensorrt_converter(torch.ops.quantized.mul)
def quantized_mul(network, target, args, kwargs, layer_name):
assert len(args) == 0
lhs_val, rhs_val = kwargs["qa"], kwargs["qb"]
if not all(isinstance(i, trt.tensorrt.ITensor) for i in [lhs_val, rhs_val]):
raise RuntimeError('Quantized mul received an input that is not part of the TensorRT region!')
Reported by Pylint.
Line: 29
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
@tensorrt_converter(torch.ops.quantized.mul)
def quantized_mul(network, target, args, kwargs, layer_name):
assert len(args) == 0
lhs_val, rhs_val = kwargs["qa"], kwargs["qb"]
if not all(isinstance(i, trt.tensorrt.ITensor) for i in [lhs_val, rhs_val]):
raise RuntimeError('Quantized mul received an input that is not part of the TensorRT region!')
Reported by Bandit.
tools/test/test_test_history.py
11 issues
Line: 7
Column: 1
import unittest
from typing import List, Optional
from tools.stats import test_history
from typing_extensions import TypedDict
class Example(TypedDict):
cmd: str
Reported by Pylint.
Line: 8
Column: 1
from typing import List, Optional
from tools.stats import test_history
from typing_extensions import TypedDict
class Example(TypedDict):
cmd: str
args: List[str]
Reported by Pylint.
Line: 32
Column: 32
return {
'cmd': cmd,
'args': shlex.split(''.join(args)),
'lines': block[i + 1:]
}
return None
def parse_description(description: str) -> List[Example]:
Reported by Pylint.
Line: 1
Column: 1
import itertools
import re
import shlex
import unittest
from typing import List, Optional
from tools.stats import test_history
from typing_extensions import TypedDict
Reported by Pylint.
Line: 11
Column: 1
from typing_extensions import TypedDict
class Example(TypedDict):
cmd: str
args: List[str]
lines: List[str]
Reported by Pylint.
Line: 11
Column: 1
from typing_extensions import TypedDict
class Example(TypedDict):
cmd: str
args: List[str]
lines: List[str]
Reported by Pylint.
Line: 17
Column: 1
lines: List[str]
def parse_block(block: List[str]) -> Optional[Example]:
if block:
match = re.match(r'^\$ ([^ ]+) (.*)$', block[0])
if match:
cmd, first = match.groups()
args = []
Reported by Pylint.
Line: 37
Column: 1
return None
def parse_description(description: str) -> List[Example]:
examples: List[Example] = []
for block in description.split('\n\n'):
matches = [
re.match(r'^ (.*)$', line)
for line in block.splitlines()
Reported by Pylint.
Line: 47
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if all(matches):
lines = []
for match in matches:
assert match
line, = match.groups()
lines.append(line)
example = parse_block(lines)
if example:
examples.append(example)
Reported by Bandit.
Line: 56
Column: 1
return examples
class TestTestHistory(unittest.TestCase):
maxDiff = None
def test_help_examples(self) -> None:
examples = parse_description(test_history.description())
self.assertEqual(len(examples), 3)
Reported by Pylint.
torch/__config__.py
11 issues
Line: 9
Column: 12
Return a human-readable string with descriptions of the
configuration of PyTorch.
"""
return torch._C._show_config()
# TODO: In principle, we could provide more structured version/config
# information here. For now only CXX_FLAGS is exposed, as Timer
# uses them.
def _cxx_flags():
Reported by Pylint.
Line: 9
Column: 12
Return a human-readable string with descriptions of the
configuration of PyTorch.
"""
return torch._C._show_config()
# TODO: In principle, we could provide more structured version/config
# information here. For now only CXX_FLAGS is exposed, as Timer
# uses them.
def _cxx_flags():
Reported by Pylint.
Line: 11
Column: 3
"""
return torch._C._show_config()
# TODO: In principle, we could provide more structured version/config
# information here. For now only CXX_FLAGS is exposed, as Timer
# uses them.
def _cxx_flags():
"""Returns the CXX_FLAGS used when building PyTorch."""
return torch._C._cxx_flags()
Reported by Pylint.
Line: 16
Column: 12
# uses them.
def _cxx_flags():
"""Returns the CXX_FLAGS used when building PyTorch."""
return torch._C._cxx_flags()
def parallel_info():
r"""Returns detailed string with parallelization settings"""
return torch._C._parallel_info()
Reported by Pylint.
Line: 16
Column: 12
# uses them.
def _cxx_flags():
"""Returns the CXX_FLAGS used when building PyTorch."""
return torch._C._cxx_flags()
def parallel_info():
r"""Returns detailed string with parallelization settings"""
return torch._C._parallel_info()
Reported by Pylint.
Line: 20
Column: 12
def parallel_info():
r"""Returns detailed string with parallelization settings"""
return torch._C._parallel_info()
Reported by Pylint.
Line: 20
Column: 12
def parallel_info():
r"""Returns detailed string with parallelization settings"""
return torch._C._parallel_info()
Reported by Pylint.
Line: 1
Column: 1
import torch
def show():
"""
Return a human-readable string with descriptions of the
configuration of PyTorch.
"""
return torch._C._show_config()
Reported by Pylint.
Line: 9
Column: 12
Return a human-readable string with descriptions of the
configuration of PyTorch.
"""
return torch._C._show_config()
# TODO: In principle, we could provide more structured version/config
# information here. For now only CXX_FLAGS is exposed, as Timer
# uses them.
def _cxx_flags():
Reported by Pylint.
Line: 16
Column: 12
# uses them.
def _cxx_flags():
"""Returns the CXX_FLAGS used when building PyTorch."""
return torch._C._cxx_flags()
def parallel_info():
r"""Returns detailed string with parallelization settings"""
return torch._C._parallel_info()
Reported by Pylint.
torch/_ops.py
11 issues
Line: 105
Column: 16
if sys.executable == "torch_deploy":
return
path = torch._utils_internal.resolve_library_path(path)
with dl_open_guard():
# Import the shared library into the process, thus running its
# static (global) initialization code in order to register custom
# operators with the JIT.
ctypes.CDLL(path)
Reported by Pylint.
Line: 1
Column: 1
import torch._C
import contextlib
import ctypes
import sys
import types
import torch.jit
import torch._utils_internal
Reported by Pylint.
Line: 3
Column: 1
import torch._C
import contextlib
import ctypes
import sys
import types
import torch.jit
import torch._utils_internal
Reported by Pylint.
Line: 4
Column: 1
import torch._C
import contextlib
import ctypes
import sys
import types
import torch.jit
import torch._utils_internal
Reported by Pylint.
Line: 5
Column: 1
import contextlib
import ctypes
import sys
import types
import torch.jit
import torch._utils_internal
Reported by Pylint.
Line: 6
Column: 1
import contextlib
import ctypes
import sys
import types
import torch.jit
import torch._utils_internal
# Query `hasattr` only once.
Reported by Pylint.
Line: 32
Column: 1
# _OpNamespace is a subclass of ModuleType because the torch script
# allows attribute lookups on modules only. Since we want torch.ops.foo.bar()
# to work from script, we need to ensure ops and foo are modules
class _OpNamespace(types.ModuleType):
"""
An op namespace to dynamically bind Operators into Python.
Say a user has created a custom Operator called "my_namespace::my_op". To
call this op, the user will write torch.ops.my_namespace.my_op(...).
Reported by Pylint.
Line: 53
Column: 9
operation will already exist).
"""
def __init__(self, name):
super(_OpNamespace, self).__init__('torch.ops.' + name)
self.name = name
def __getattr__(self, op_name):
# It is not a valid op_name when __file__ is passed in
if op_name == '__file__':
Reported by Pylint.
Line: 63
Column: 9
# Get the op `my_namespace::my_op` if available. This will also check
# for overloads and raise an exception if there are more than one.
qualified_op_name = '{}::{}'.format(self.name, op_name)
op = torch._C._jit_get_operation(qualified_op_name)
# let the script frontend know that op is identical to the builtin op
# with qualified_op_name
torch.jit._builtins._register_builtin(op, qualified_op_name)
setattr(self, op_name, op)
op.__module__ = self.__module__ + "." + self.name
Reported by Pylint.
Line: 75
Column: 9
__file__ = '_ops.py'
def __init__(self):
super(_Ops, self).__init__('torch.ops')
self.loaded_libraries = set()
def __getattr__(self, name):
# Here we are creating `torch.ops.my_namespace`
namespace = _OpNamespace(name)
Reported by Pylint.
tools/build_pytorch_libs.py
11 issues
Line: 6
Column: 1
import shutil
from typing import Dict, Optional
from .setup_helpers.env import IS_64BIT, IS_WINDOWS, check_negative_env_flag
from .setup_helpers.cmake import USE_NINJA, CMake
from setuptools import distutils # type: ignore[import]
def _overlay_windows_vcvars(env: Dict[str, str]) -> Dict[str, str]:
Reported by Pylint.
Line: 7
Column: 1
from typing import Dict, Optional
from .setup_helpers.env import IS_64BIT, IS_WINDOWS, check_negative_env_flag
from .setup_helpers.cmake import USE_NINJA, CMake
from setuptools import distutils # type: ignore[import]
def _overlay_windows_vcvars(env: Dict[str, str]) -> Dict[str, str]:
vc_arch = 'x64' if IS_64BIT else 'x86'
Reported by Pylint.
Line: 13
Column: 30
def _overlay_windows_vcvars(env: Dict[str, str]) -> Dict[str, str]:
vc_arch = 'x64' if IS_64BIT else 'x86'
vc_env: Dict[str, str] = distutils._msvccompiler._get_vc_env(vc_arch)
# Keys in `_get_vc_env` are always lowercase.
# We turn them into uppercase before overlaying vcvars
# because OS environ keys are always uppercase on Windows.
# https://stackoverflow.com/a/7797329
vc_env = {k.upper(): v for k, v in vc_env.items()}
Reported by Pylint.
Line: 13
Column: 30
def _overlay_windows_vcvars(env: Dict[str, str]) -> Dict[str, str]:
vc_arch = 'x64' if IS_64BIT else 'x86'
vc_env: Dict[str, str] = distutils._msvccompiler._get_vc_env(vc_arch)
# Keys in `_get_vc_env` are always lowercase.
# We turn them into uppercase before overlaying vcvars
# because OS environ keys are always uppercase on Windows.
# https://stackoverflow.com/a/7797329
vc_env = {k.upper(): v for k, v in vc_env.items()}
Reported by Pylint.
Line: 27
Column: 3
def _create_build_env() -> Dict[str, str]:
# XXX - our cmake file sometimes looks at the system environment
# and not cmake flags!
# you should NEVER add something to this list. It is bad practice to
# have cmake read the environment
my_env = os.environ.copy()
if 'CUDA_HOME' in my_env: # Keep CUDA_HOME. This env variable is still used in other part.
Reported by Pylint.
Line: 1
Column: 1
import os
from glob import glob
import shutil
from typing import Dict, Optional
from .setup_helpers.env import IS_64BIT, IS_WINDOWS, check_negative_env_flag
from .setup_helpers.cmake import USE_NINJA, CMake
from setuptools import distutils # type: ignore[import]
Reported by Pylint.
Line: 9
Column: 1
from .setup_helpers.env import IS_64BIT, IS_WINDOWS, check_negative_env_flag
from .setup_helpers.cmake import USE_NINJA, CMake
from setuptools import distutils # type: ignore[import]
def _overlay_windows_vcvars(env: Dict[str, str]) -> Dict[str, str]:
vc_arch = 'x64' if IS_64BIT else 'x86'
vc_env: Dict[str, str] = distutils._msvccompiler._get_vc_env(vc_arch)
# Keys in `_get_vc_env` are always lowercase.
Reported by Pylint.
Line: 19
Column: 12
# because OS environ keys are always uppercase on Windows.
# https://stackoverflow.com/a/7797329
vc_env = {k.upper(): v for k, v in vc_env.items()}
for k, v in env.items():
uk = k.upper()
if uk not in vc_env:
vc_env[uk] = v
return vc_env
Reported by Pylint.
Line: 20
Column: 9
# https://stackoverflow.com/a/7797329
vc_env = {k.upper(): v for k, v in vc_env.items()}
for k, v in env.items():
uk = k.upper()
if uk not in vc_env:
vc_env[uk] = v
return vc_env
Reported by Pylint.
Line: 48
Column: 1
return my_env
def build_caffe2(
version: Optional[str],
cmake_python_library: Optional[str],
build_python: bool,
rerun_cmake: bool,
cmake_only: bool,
Reported by Pylint.
tools/codegen/code_template.py
11 issues
Line: 1
Column: 1
import re
from typing import Match, Optional, Sequence, Mapping
# match $identifier or ${identifier} and replace with value in env
# If this identifier is at the beginning of whitespace on a line
# and its value is a list then it is treated as
# block substitution by indenting to that depth and putting each element
# of the list on its own line
# if the identifier is on a line starting with non-whitespace and a list
Reported by Pylint.
Line: 14
Column: 1
# if this list is not empty and ${foo,} will insert one after.
class CodeTemplate:
# Python 2.7.5 has a bug where the leading (^[^\n\S]*)? does not work,
# workaround via appending another [^\n\S]? inside
substitution_str = r'(^[^\n\S]*[^\n\S]?)?\$([^\d\W]\w*|\{,?[^\d\W]\w*\,?})'
Reported by Pylint.
Line: 32
Column: 5
filename: str
@staticmethod
def from_file(filename: str) -> 'CodeTemplate':
with open(filename, 'r') as f:
return CodeTemplate(f.read(), filename)
def __init__(self, pattern: str, filename: str = "") -> None:
self.pattern = pattern
Reported by Pylint.
Line: 33
Column: 37
@staticmethod
def from_file(filename: str) -> 'CodeTemplate':
with open(filename, 'r') as f:
return CodeTemplate(f.read(), filename)
def __init__(self, pattern: str, filename: str = "") -> None:
self.pattern = pattern
self.filename = filename
Reported by Pylint.
Line: 40
Column: 5
self.pattern = pattern
self.filename = filename
def substitute(self, env: Optional[Mapping[str, object]] = None, **kwargs: object) -> str:
if env is None:
env = {}
def lookup(v: str) -> object:
assert env is not None
Reported by Pylint.
Line: 44
Column: 9
if env is None:
env = {}
def lookup(v: str) -> object:
assert env is not None
return kwargs[v] if v in kwargs else env[v]
def indent_lines(indent: str, v: Sequence[object]) -> str:
return "".join([indent + l + "\n" for e in v for l in str(e).splitlines()]).rstrip()
Reported by Pylint.
Line: 45
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
env = {}
def lookup(v: str) -> object:
assert env is not None
return kwargs[v] if v in kwargs else env[v]
def indent_lines(indent: str, v: Sequence[object]) -> str:
return "".join([indent + l + "\n" for e in v for l in str(e).splitlines()]).rstrip()
Reported by Bandit.
Line: 48
Column: 9
assert env is not None
return kwargs[v] if v in kwargs else env[v]
def indent_lines(indent: str, v: Sequence[object]) -> str:
return "".join([indent + l + "\n" for e in v for l in str(e).splitlines()]).rstrip()
def replace(match: Match[str]) -> str:
indent = match.group(1)
key = match.group(2)
Reported by Pylint.
Line: 64
Column: 13
if key[-1] == ',':
comma_after = ', '
key = key[:-1]
v = lookup(key)
if indent is not None:
if not isinstance(v, list):
v = [v]
return indent_lines(indent, v)
elif isinstance(v, list):
Reported by Pylint.
Line: 65
Column: 13
comma_after = ', '
key = key[:-1]
v = lookup(key)
if indent is not None:
if not isinstance(v, list):
v = [v]
return indent_lines(indent, v)
elif isinstance(v, list):
middle = ', '.join([str(x) for x in v])
Reported by Pylint.
torch/fx/_pytree.py
11 issues
Line: 1
Column: 1
from typing import Callable, Any, Tuple, List, Dict, Type, NamedTuple
from torch.utils._pytree import PyTree, TreeSpec, LeafSpec
from collections import namedtuple
FlattenFuncSpec = Callable[[PyTree, TreeSpec], List]
SUPPORTED_NODES: Dict[Type[Any], Any] = {}
def register_pytree_flatten_spec(typ: Any, flatten_fn_spec: FlattenFuncSpec) -> None:
SUPPORTED_NODES[typ] = flatten_fn_spec
Reported by Pylint.
Line: 3
Column: 1
from typing import Callable, Any, Tuple, List, Dict, Type, NamedTuple
from torch.utils._pytree import PyTree, TreeSpec, LeafSpec
from collections import namedtuple
FlattenFuncSpec = Callable[[PyTree, TreeSpec], List]
SUPPORTED_NODES: Dict[Type[Any], Any] = {}
def register_pytree_flatten_spec(typ: Any, flatten_fn_spec: FlattenFuncSpec) -> None:
SUPPORTED_NODES[typ] = flatten_fn_spec
Reported by Pylint.
Line: 8
Column: 1
FlattenFuncSpec = Callable[[PyTree, TreeSpec], List]
SUPPORTED_NODES: Dict[Type[Any], Any] = {}
def register_pytree_flatten_spec(typ: Any, flatten_fn_spec: FlattenFuncSpec) -> None:
SUPPORTED_NODES[typ] = flatten_fn_spec
def tree_flatten_spec(pytree: PyTree, spec: TreeSpec) -> List[Any]:
if isinstance(spec, LeafSpec):
return [pytree]
Reported by Pylint.
Line: 11
Column: 1
def register_pytree_flatten_spec(typ: Any, flatten_fn_spec: FlattenFuncSpec) -> None:
SUPPORTED_NODES[typ] = flatten_fn_spec
def tree_flatten_spec(pytree: PyTree, spec: TreeSpec) -> List[Any]:
if isinstance(spec, LeafSpec):
return [pytree]
if spec.type not in SUPPORTED_NODES:
raise RuntimeError(
f"{type(pytree)} does not have a flatten_fn_spec associated with it. Please register one with"
Reported by Pylint.
Line: 16
Column: 1
return [pytree]
if spec.type not in SUPPORTED_NODES:
raise RuntimeError(
f"{type(pytree)} does not have a flatten_fn_spec associated with it. Please register one with"
"torch.fx._pytree.register_pytree_flatten_spec. If you have serialized your model, make"
"sure that any custom pytrees have been registered before loading it.")
flatten_fn_spec = SUPPORTED_NODES[spec.type]
child_pytrees = flatten_fn_spec(pytree, spec)
result = []
Reported by Pylint.
Line: 17
Column: 1
if spec.type not in SUPPORTED_NODES:
raise RuntimeError(
f"{type(pytree)} does not have a flatten_fn_spec associated with it. Please register one with"
"torch.fx._pytree.register_pytree_flatten_spec. If you have serialized your model, make"
"sure that any custom pytrees have been registered before loading it.")
flatten_fn_spec = SUPPORTED_NODES[spec.type]
child_pytrees = flatten_fn_spec(pytree, spec)
result = []
for child, child_spec in zip(child_pytrees, spec.children_specs):
Reported by Pylint.
Line: 27
Column: 1
result += flat
return result
def _dict_flatten_spec(d: Dict[Any, Any], spec: TreeSpec) -> List[Any]:
return list([d[k] for k in spec.context])
def _list_flatten_spec(d: List[Any], spec: TreeSpec) -> List[Any]:
return [d[i] for i in range(len(spec.children_specs))]
Reported by Pylint.
Line: 28
Column: 12
return result
def _dict_flatten_spec(d: Dict[Any, Any], spec: TreeSpec) -> List[Any]:
return list([d[k] for k in spec.context])
def _list_flatten_spec(d: List[Any], spec: TreeSpec) -> List[Any]:
return [d[i] for i in range(len(spec.children_specs))]
def _tuple_flatten_spec(d: Tuple[Any], spec: TreeSpec) -> List[Any]:
Reported by Pylint.
Line: 30
Column: 1
def _dict_flatten_spec(d: Dict[Any, Any], spec: TreeSpec) -> List[Any]:
return list([d[k] for k in spec.context])
def _list_flatten_spec(d: List[Any], spec: TreeSpec) -> List[Any]:
return [d[i] for i in range(len(spec.children_specs))]
def _tuple_flatten_spec(d: Tuple[Any], spec: TreeSpec) -> List[Any]:
return [d[i] for i in range(len(spec.children_specs))]
Reported by Pylint.
Line: 33
Column: 1
def _list_flatten_spec(d: List[Any], spec: TreeSpec) -> List[Any]:
return [d[i] for i in range(len(spec.children_specs))]
def _tuple_flatten_spec(d: Tuple[Any], spec: TreeSpec) -> List[Any]:
return [d[i] for i in range(len(spec.children_specs))]
def _namedtuple_flatten_spec(d: NamedTuple, spec: TreeSpec) -> List[Any]:
return [d[i] for i in range(len(spec.children_specs))]
Reported by Pylint.
torch/fx/__init__.py
11 issues
Line: 84
Column: 1
repository.
'''
from .graph_module import GraphModule
from ._symbolic_trace import symbolic_trace, Tracer, wrap, PH, ProxyableClassMeta
from .graph import Graph
from .node import Node, map_arg
from .proxy import Proxy
from .interpreter import Interpreter as Interpreter, Transformer as Transformer
Reported by Pylint.
Line: 85
Column: 1
'''
from .graph_module import GraphModule
from ._symbolic_trace import symbolic_trace, Tracer, wrap, PH, ProxyableClassMeta
from .graph import Graph
from .node import Node, map_arg
from .proxy import Proxy
from .interpreter import Interpreter as Interpreter, Transformer as Transformer
from .subgraph_rewriter import replace_pattern
Reported by Pylint.
Line: 86
Column: 1
from .graph_module import GraphModule
from ._symbolic_trace import symbolic_trace, Tracer, wrap, PH, ProxyableClassMeta
from .graph import Graph
from .node import Node, map_arg
from .proxy import Proxy
from .interpreter import Interpreter as Interpreter, Transformer as Transformer
from .subgraph_rewriter import replace_pattern
Reported by Pylint.
Line: 87
Column: 1
from .graph_module import GraphModule
from ._symbolic_trace import symbolic_trace, Tracer, wrap, PH, ProxyableClassMeta
from .graph import Graph
from .node import Node, map_arg
from .proxy import Proxy
from .interpreter import Interpreter as Interpreter, Transformer as Transformer
from .subgraph_rewriter import replace_pattern
Reported by Pylint.
Line: 88
Column: 1
from ._symbolic_trace import symbolic_trace, Tracer, wrap, PH, ProxyableClassMeta
from .graph import Graph
from .node import Node, map_arg
from .proxy import Proxy
from .interpreter import Interpreter as Interpreter, Transformer as Transformer
from .subgraph_rewriter import replace_pattern
Reported by Pylint.
Line: 89
Column: 1
from .graph import Graph
from .node import Node, map_arg
from .proxy import Proxy
from .interpreter import Interpreter as Interpreter, Transformer as Transformer
from .subgraph_rewriter import replace_pattern
Reported by Pylint.
Line: 90
Column: 1
from .node import Node, map_arg
from .proxy import Proxy
from .interpreter import Interpreter as Interpreter, Transformer as Transformer
from .subgraph_rewriter import replace_pattern
Reported by Pylint.
Line: 33
Column: 1
"""
graph(x):
%param : [#users=1] = self.param
%add_1 : [#users=1] = call_function[target=<built-in function add>](args = (%x, %param), kwargs = {})
%linear_1 : [#users=1] = call_module[target=linear](args = (%add_1,), kwargs = {})
%clamp_1 : [#users=1] = call_method[target=clamp](args = (%linear_1,), kwargs = {min: 0.0, max: 1.0})
return clamp_1
"""
Reported by Pylint.
Line: 35
Column: 1
%param : [#users=1] = self.param
%add_1 : [#users=1] = call_function[target=<built-in function add>](args = (%x, %param), kwargs = {})
%linear_1 : [#users=1] = call_module[target=linear](args = (%add_1,), kwargs = {})
%clamp_1 : [#users=1] = call_method[target=clamp](args = (%linear_1,), kwargs = {min: 0.0, max: 1.0})
return clamp_1
"""
# Code generation - valid Python code
print(symbolic_traced.code)
Reported by Pylint.
Line: 89
Column: 1
from .graph import Graph
from .node import Node, map_arg
from .proxy import Proxy
from .interpreter import Interpreter as Interpreter, Transformer as Transformer
from .subgraph_rewriter import replace_pattern
Reported by Pylint.
test/package/common.py
11 issues
Line: 5
Column: 1
import sys
from tempfile import NamedTemporaryFile
import torch.package.package_exporter
from torch.testing._internal.common_utils import IS_WINDOWS, TestCase
class PackageTestCase(TestCase):
def __init__(self, *args, **kwargs):
Reported by Pylint.
Line: 6
Column: 1
from tempfile import NamedTemporaryFile
import torch.package.package_exporter
from torch.testing._internal.common_utils import IS_WINDOWS, TestCase
class PackageTestCase(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
Reported by Pylint.
Line: 32
Column: 9
self.package_test_dir = os.path.dirname(os.path.realpath(__file__))
self.orig_sys_path = sys.path.copy()
sys.path.append(self.package_test_dir)
torch.package.package_exporter._gate_torchscript_serialization = False
def tearDown(self):
super().tearDown()
sys.path = self.orig_sys_path
Reported by Pylint.
Line: 1
Column: 1
import os
import sys
from tempfile import NamedTemporaryFile
import torch.package.package_exporter
from torch.testing._internal.common_utils import IS_WINDOWS, TestCase
class PackageTestCase(TestCase):
Reported by Pylint.
Line: 9
Column: 1
from torch.testing._internal.common_utils import IS_WINDOWS, TestCase
class PackageTestCase(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._temporary_files = []
def temp(self):
Reported by Pylint.
Line: 14
Column: 5
super().__init__(*args, **kwargs)
self._temporary_files = []
def temp(self):
t = NamedTemporaryFile()
name = t.name
if IS_WINDOWS:
t.close() # can't read an open file in windows
else:
Reported by Pylint.
Line: 15
Column: 9
self._temporary_files = []
def temp(self):
t = NamedTemporaryFile()
name = t.name
if IS_WINDOWS:
t.close() # can't read an open file in windows
else:
self._temporary_files.append(t)
Reported by Pylint.
Line: 23
Column: 5
self._temporary_files.append(t)
return name
def setUp(self):
"""Add test/package/ to module search path. This ensures that
importing our fake packages via, e.g. `import package_a` will always
work regardless of how we invoke the test.
"""
super().setUp()
Reported by Pylint.
Line: 34
Column: 5
sys.path.append(self.package_test_dir)
torch.package.package_exporter._gate_torchscript_serialization = False
def tearDown(self):
super().tearDown()
sys.path = self.orig_sys_path
# remove any temporary files
for t in self._temporary_files:
Reported by Pylint.
Line: 34
Column: 5
sys.path.append(self.package_test_dir)
torch.package.package_exporter._gate_torchscript_serialization = False
def tearDown(self):
super().tearDown()
sys.path = self.orig_sys_path
# remove any temporary files
for t in self._temporary_files:
Reported by Pylint.