The following issues were found
pipenv/vendor/vistir/backports/tempfile.py
27 issues
Line: 22
Column: 9
try:
return os.fsencode(path)
except AttributeError:
from ..compat import fs_encode
return fs_encode(path)
def fs_decode(path):
Reported by Pylint.
Line: 31
Column: 9
try:
return os.fsdecode(path)
except AttributeError:
from ..compat import fs_decode
return fs_decode(path)
__all__ = ["finalize", "NamedTemporaryFile"]
Reported by Pylint.
Line: 217
Column: 18
if not wrapper_class_override:
wrapper_class_override = _TemporaryFileWrapper
if os.name == "nt" and delete:
flags |= os.O_TEMPORARY
if sys.version_info < (3, 5):
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
else:
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
Reported by Pylint.
Line: 8
Column: 1
import io
import os
import sys
from tempfile import _bin_openflags, _mkstemp_inner, gettempdir
import six
try:
from weakref import finalize
Reported by Pylint.
Line: 8
Column: 1
import io
import os
import sys
from tempfile import _bin_openflags, _mkstemp_inner, gettempdir
import six
try:
from weakref import finalize
Reported by Pylint.
Line: 8
Column: 1
import io
import os
import sys
from tempfile import _bin_openflags, _mkstemp_inner, gettempdir
import six
try:
from weakref import finalize
Reported by Pylint.
Line: 22
Column: 9
try:
return os.fsencode(path)
except AttributeError:
from ..compat import fs_encode
return fs_encode(path)
def fs_decode(path):
Reported by Pylint.
Line: 31
Column: 9
try:
return os.fsdecode(path)
except AttributeError:
from ..compat import fs_decode
return fs_decode(path)
__all__ = ["finalize", "NamedTemporaryFile"]
Reported by Pylint.
Line: 40
Column: 5
try:
from tempfile import _infer_return_type
except ImportError:
def _infer_return_type(*args):
_types = set()
for arg in args:
Reported by Pylint.
Line: 55
Column: 38
return _types.pop()
def _sanitize_params(prefix, suffix, dir):
"""Common parameter processing for most APIs in this module."""
output_type = _infer_return_type(prefix, suffix, dir)
if suffix is None:
suffix = output_type()
if prefix is None:
Reported by Pylint.
pipenv/vendor/wheel/vendored/packaging/tags.py
27 issues
Line: 26
Column: 1
import sysconfig
import warnings
from ._typing import TYPE_CHECKING, cast
if TYPE_CHECKING: # pragma: no cover
from typing import (
Dict,
FrozenSet,
Reported by Pylint.
Line: 89
Column: 42
__slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
def __init__(self, interpreter, abi, platform):
# type: (str, str, str) -> None
self._interpreter = interpreter.lower()
self._abi = abi.lower()
self._platform = platform.lower()
# The __hash__ of every single element in a Set[Tag] will be evaluated each time
Reported by Pylint.
Line: 400
Column: 3
formats.extend(["intel", "fat32", "fat"])
elif cpu_arch == "ppc64":
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
if version > (10, 5) or version < (10, 4):
return []
formats.append("fat64")
elif cpu_arch == "ppc":
Reported by Pylint.
Line: 433
Column: 9
if version is None:
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
else:
version = version
if arch is None:
arch = _mac_arch(cpu_arch)
else:
arch = arch
Reported by Pylint.
Line: 437
Column: 9
if arch is None:
arch = _mac_arch(cpu_arch)
else:
arch = arch
if (10, 0) <= version and version < (11, 0):
# Prior to Mac OS 11, each yearly release of Mac OS bumped the
# "minor" version number. The major version was always 10.
for minor_version in range(version[1], -1, -1):
Reported by Pylint.
Line: 477
Column: 30
# From PEP 513, PEP 600
def _is_manylinux_compatible(name, arch, glibc_version):
# type: (str, str, GlibcVersion) -> bool
sys_glibc = _get_glibc_version()
if sys_glibc < glibc_version:
return False
# Check for presence of _manylinux module.
Reported by Pylint.
Line: 648
Column: 17
fmt, file.read(struct.calcsize(fmt))
) # type: (int, )
except struct.error:
raise _ELFFileHeader._InvalidELFFileHeader()
return result
self.e_ident_magic = unpack(">I")
if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
raise _ELFFileHeader._InvalidELFFileHeader()
Reported by Pylint.
Line: 688
Column: 42
try:
with open(sys.executable, "rb") as f:
elf_header = _ELFFileHeader(f)
except (IOError, OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
return None
return elf_header
def _is_linux_armhf():
Reported by Pylint.
Line: 1
Column: 1
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import
import distutils.util
try:
Reported by Pylint.
Line: 76
Column: 1
# be 50 for testing. Once this actually happens, update the dictionary
# with the actual value.
_LAST_GLIBC_MINOR = collections.defaultdict(lambda: 50) # type: Dict[int, int]
glibcVersion = collections.namedtuple("Version", ["major", "minor"])
class Tag(object):
"""
A representation of the tag triple for a wheel.
Reported by Pylint.
pipenv/vendor/yaspin/core.py
27 issues
Line: 21
Column: 1
import time
from typing import List, Set, Union
from termcolor import colored
from pipenv.vendor import colorama
from pipenv.vendor.vistir import cursor
from .base_spinner import Spinner, default_spinner
from .constants import COLOR_ATTRS, COLOR_MAP, SPINNER_ATTRS
Reported by Pylint.
Line: 25
Column: 1
from pipenv.vendor import colorama
from pipenv.vendor.vistir import cursor
from .base_spinner import Spinner, default_spinner
from .constants import COLOR_ATTRS, COLOR_MAP, SPINNER_ATTRS
from .helpers import to_unicode
colorama.init()
Reported by Pylint.
Line: 26
Column: 1
from pipenv.vendor.vistir import cursor
from .base_spinner import Spinner, default_spinner
from .constants import COLOR_ATTRS, COLOR_MAP, SPINNER_ATTRS
from .helpers import to_unicode
colorama.init()
Reported by Pylint.
Line: 27
Column: 1
from .base_spinner import Spinner, default_spinner
from .constants import COLOR_ATTRS, COLOR_MAP, SPINNER_ATTRS
from .helpers import to_unicode
colorama.init()
class Yaspin: # pylint: disable=useless-object-inheritance,too-many-instance-attributes
Reported by Pylint.
Line: 122
Column: 13
def __getattr__(self, name):
# CLI spinners
if name in SPINNER_ATTRS:
from .spinners import Spinners # pylint: disable=import-outside-toplevel
sp = getattr(Spinners, name)
self.spinner = sp
# Color Attributes: "color", "on_color", "attrs"
elif name in COLOR_ATTRS:
Reported by Pylint.
Line: 503
Column: 3
if isinstance(spinner.frames, str):
uframes = spinner.frames
# TODO (pavdmyt): support any type that implements iterable
if isinstance(spinner.frames, (list, tuple)):
# Empty ``spinner.frames`` is handled by ``Yaspin._set_spinner``
if spinner.frames and isinstance(spinner.frames[0], bytes):
uframes_seq = [to_unicode(frame) for frame in spinner.frames]
Reported by Pylint.
Line: 111
Column: 5
self.stop()
return False # nothing is handled
def __call__(self, fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
with self:
return fn(*args, **kwargs)
Reported by Pylint.
Line: 124
Column: 13
if name in SPINNER_ATTRS:
from .spinners import Spinners # pylint: disable=import-outside-toplevel
sp = getattr(Spinners, name)
self.spinner = sp
# Color Attributes: "color", "on_color", "attrs"
elif name in COLOR_ATTRS:
attr_type = COLOR_MAP[name]
# Call appropriate property setters;
Reported by Pylint.
Line: 151
Column: 5
# Properties
#
@property
def spinner(self):
return self._spinner
@spinner.setter
def spinner(self, sp):
self._spinner = self._set_spinner(sp)
Reported by Pylint.
Line: 155
Column: 5
return self._spinner
@spinner.setter
def spinner(self, sp):
self._spinner = self._set_spinner(sp)
self._frames = self._set_frames(self._spinner, self._reversal)
self._interval = self._set_interval(self._spinner)
self._cycle = self._set_cycle(self._frames)
Reported by Pylint.
pipenv/vendor/charset_normalizer/utils.py
26 issues
Line: 15
Column: 1
from encodings.aliases import aliases
from functools import lru_cache
from charset_normalizer.constant import UNICODE_RANGES_COMBINED, UNICODE_SECONDARY_RANGE_KEYWORD, \
RE_POSSIBLE_ENCODING_INDICATION, ENCODING_MARKS, UTF8_MAXIMAL_ALLOCATION, IANA_SUPPORTED_SIMILAR
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_accentuated(character: str) -> bool:
Reported by Pylint.
Line: 1
Column: 1
try:
import unicodedata2 as unicodedata
except ImportError:
import unicodedata
from codecs import IncrementalDecoder
from re import findall
from typing import Optional, Tuple, Union, List, Set
import importlib
Reported by Pylint.
Line: 12
Column: 1
import importlib
from _multibytecodec import MultibyteIncrementalDecoder # type: ignore
from encodings.aliases import aliases
from functools import lru_cache
from charset_normalizer.constant import UNICODE_RANGES_COMBINED, UNICODE_SECONDARY_RANGE_KEYWORD, \
RE_POSSIBLE_ENCODING_INDICATION, ENCODING_MARKS, UTF8_MAXIMAL_ALLOCATION, IANA_SUPPORTED_SIMILAR
Reported by Pylint.
Line: 13
Column: 1
from _multibytecodec import MultibyteIncrementalDecoder # type: ignore
from encodings.aliases import aliases
from functools import lru_cache
from charset_normalizer.constant import UNICODE_RANGES_COMBINED, UNICODE_SECONDARY_RANGE_KEYWORD, \
RE_POSSIBLE_ENCODING_INDICATION, ENCODING_MARKS, UTF8_MAXIMAL_ALLOCATION, IANA_SUPPORTED_SIMILAR
Reported by Pylint.
Line: 20
Column: 1
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_accentuated(character: str) -> bool:
try:
description = unicodedata.name(character) # type: str
except ValueError:
return False
return "WITH GRAVE" in description or "WITH ACUTE" in description or "WITH CEDILLA" in description
Reported by Pylint.
Line: 25
Column: 1
description = unicodedata.name(character) # type: str
except ValueError:
return False
return "WITH GRAVE" in description or "WITH ACUTE" in description or "WITH CEDILLA" in description
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def remove_accent(character: str) -> str:
decomposed = unicodedata.decomposition(character) # type: str
Reported by Pylint.
Line: 29
Column: 1
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def remove_accent(character: str) -> str:
decomposed = unicodedata.decomposition(character) # type: str
if not decomposed:
return character
codes = decomposed.split(" ") # type: List[str]
Reported by Pylint.
Line: 59
Column: 1
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_latin(character: str) -> bool:
try:
description = unicodedata.name(character) # type: str
except ValueError:
return False
return "LATIN" in description
Reported by Pylint.
Line: 68
Column: 1
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_punctuation(character: str) -> bool:
character_category = unicodedata.category(character) # type: str
if "P" in character_category:
return True
Reported by Pylint.
Line: 83
Column: 1
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_symbol(character: str) -> bool:
character_category = unicodedata.category(character) # type: str
if "S" in character_category or "N" in character_category:
return True
Reported by Pylint.
pipenv/vendor/charset_normalizer/cd.py
26 issues
Line: 6
Column: 1
from typing import List, Set, Optional, Tuple, Dict
import importlib
from charset_normalizer.models import CoherenceMatches
from charset_normalizer.utils import unicode_range, is_unicode_range_secondary, is_multi_byte_encoding
from charset_normalizer.md import is_suspiciously_successive_range
from charset_normalizer.assets import FREQUENCIES
from collections import Counter
Reported by Pylint.
Line: 7
Column: 1
import importlib
from charset_normalizer.models import CoherenceMatches
from charset_normalizer.utils import unicode_range, is_unicode_range_secondary, is_multi_byte_encoding
from charset_normalizer.md import is_suspiciously_successive_range
from charset_normalizer.assets import FREQUENCIES
from collections import Counter
Reported by Pylint.
Line: 8
Column: 1
from charset_normalizer.models import CoherenceMatches
from charset_normalizer.utils import unicode_range, is_unicode_range_secondary, is_multi_byte_encoding
from charset_normalizer.md import is_suspiciously_successive_range
from charset_normalizer.assets import FREQUENCIES
from collections import Counter
def encoding_unicode_range(iana_name: str) -> List[str]:
Reported by Pylint.
Line: 9
Column: 1
from charset_normalizer.models import CoherenceMatches
from charset_normalizer.utils import unicode_range, is_unicode_range_secondary, is_multi_byte_encoding
from charset_normalizer.md import is_suspiciously_successive_range
from charset_normalizer.assets import FREQUENCIES
from collections import Counter
def encoding_unicode_range(iana_name: str) -> List[str]:
"""
Reported by Pylint.
Line: 1
Column: 1
from codecs import IncrementalDecoder
from functools import lru_cache
from typing import List, Set, Optional, Tuple, Dict
import importlib
from charset_normalizer.models import CoherenceMatches
from charset_normalizer.utils import unicode_range, is_unicode_range_secondary, is_multi_byte_encoding
from charset_normalizer.md import is_suspiciously_successive_range
from charset_normalizer.assets import FREQUENCIES
Reported by Pylint.
Line: 7
Column: 1
import importlib
from charset_normalizer.models import CoherenceMatches
from charset_normalizer.utils import unicode_range, is_unicode_range_secondary, is_multi_byte_encoding
from charset_normalizer.md import is_suspiciously_successive_range
from charset_normalizer.assets import FREQUENCIES
from collections import Counter
Reported by Pylint.
Line: 10
Column: 1
from charset_normalizer.utils import unicode_range, is_unicode_range_secondary, is_multi_byte_encoding
from charset_normalizer.md import is_suspiciously_successive_range
from charset_normalizer.assets import FREQUENCIES
from collections import Counter
def encoding_unicode_range(iana_name: str) -> List[str]:
"""
Return associated unicode ranges in a single byte code page.
Reported by Pylint.
Line: 20
Column: 1
if is_multi_byte_encoding(iana_name):
raise IOError("Function not supported on multi-byte code page")
decoder = importlib.import_module('encodings.{}'.format(iana_name)).IncrementalDecoder # type: ignore
p = decoder(errors="ignore") # type: IncrementalDecoder
seen_ranges = set() # type: Set[str]
for i in range(48, 255):
Reported by Pylint.
Line: 22
Column: 5
decoder = importlib.import_module('encodings.{}'.format(iana_name)).IncrementalDecoder # type: ignore
p = decoder(errors="ignore") # type: IncrementalDecoder
seen_ranges = set() # type: Set[str]
for i in range(48, 255):
chunk = p.decode(
bytes([i])
Reported by Pylint.
Line: 60
Column: 1
@lru_cache()
def encoding_languages(iana_name: str) -> List[str]:
"""
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
unicode_ranges = encoding_unicode_range(iana_name) # type: List[str]
primary_range = None # type: Optional[str]
Reported by Pylint.
pipenv/patched/notpip/_internal/operations/prepare.py
26 issues
Line: 50
Column: 5
Callable, List, Optional, Tuple,
)
from mypy_extensions import TypedDict
from pipenv.patched.notpip._internal.distributions import AbstractDistribution
from pipenv.patched.notpip._internal.index.package_finder import PackageFinder
from pipenv.patched.notpip._internal.models.link import Link
from pipenv.patched.notpip._internal.network.download import Downloader
Reported by Pylint.
Line: 20
Column: 1
make_distribution_for_install_requirement,
)
from pipenv.patched.notpip._internal.distributions.installed import InstalledDistribution
from pipenv.patched.notpip._internal.exceptions import (
DirectoryUrlHashUnsupported,
HashMismatch,
HashUnpinned,
InstallationError,
PreviousBuildDirError,
Reported by Pylint.
Line: 192
Column: 19
def _copy_source_tree(source, target):
# type: (str, str) -> None
def ignore(d, names):
# type: (str, List[str]) -> List[str]
# Pulling in those directories can potentially be very slow,
# exclude the following directories if they appear in the top
# level dir (and only it).
# See discussion at https://github.com/pypa/pip/pull/6770
Reported by Pylint.
Line: 411
Column: 3
assert req.link
link = req.link
# TODO: Breakup into smaller functions
if link.scheme == 'file':
path = link.file_path
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req.req or req)
Reported by Pylint.
Line: 429
Column: 3
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req.source_dir`
if os.path.exists(os.path.join(req.source_dir, 'setup.py')):
rmtree(req.source_dir)
# Now that we have the real link, we can tell what kind of
Reported by Pylint.
Line: 483
Column: 17
req,
exc,
)
raise InstallationError(
'Could not install requirement {} because of HTTP '
'error {} for URL {}'.format(req, exc, link)
)
# For use in later processing, preserve the file path on the
Reported by Pylint.
Line: 52
Column: 5
from mypy_extensions import TypedDict
from pipenv.patched.notpip._internal.distributions import AbstractDistribution
from pipenv.patched.notpip._internal.index.package_finder import PackageFinder
from pipenv.patched.notpip._internal.models.link import Link
from pipenv.patched.notpip._internal.network.download import Downloader
from pipenv.patched.notpip._internal.req.req_install import InstallRequirement
from pipenv.patched.notpip._internal.req.req_tracker import RequirementTracker
Reported by Pylint.
Line: 99
Column: 1
return abstract_dist
def unpack_vcs_link(link, location):
# type: (Link, str) -> None
vcs_backend = vcs.get_backend_for_scheme(link.scheme)
assert vcs_backend is not None
vcs_backend.unpack(location, url=hide_url(link.url))
Reported by Pylint.
Line: 102
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
def unpack_vcs_link(link, location):
# type: (Link, str) -> None
vcs_backend = vcs.get_backend_for_scheme(link.scheme)
assert vcs_backend is not None
vcs_backend.unpack(location, url=hide_url(link.url))
def _copy_file(filename, location, link):
# type: (str, str, Link) -> None
Reported by Bandit.
Line: 137
Column: 1
logger.info('Saved %s', display_path(download_location))
def unpack_http_url(
link, # type: Link
location, # type: str
downloader, # type: Downloader
download_dir=None, # type: Optional[str]
hashes=None, # type: Optional[Hashes]
Reported by Pylint.
pipenv/vendor/charset_normalizer/md.py
26 issues
Line: 4
Column: 1
from functools import lru_cache
from typing import Optional, List
from charset_normalizer.constant import UNICODE_SECONDARY_RANGE_KEYWORD
from charset_normalizer.utils import is_punctuation, is_symbol, unicode_range, is_accentuated, is_latin, \
remove_accent, is_separator, is_cjk
class MessDetectorPlugin:
Reported by Pylint.
Line: 5
Column: 1
from typing import Optional, List
from charset_normalizer.constant import UNICODE_SECONDARY_RANGE_KEYWORD
from charset_normalizer.utils import is_punctuation, is_symbol, unicode_range, is_accentuated, is_latin, \
remove_accent, is_separator, is_cjk
class MessDetectorPlugin:
"""
Reported by Pylint.
Line: 1
Column: 1
from functools import lru_cache
from typing import Optional, List
from charset_normalizer.constant import UNICODE_SECONDARY_RANGE_KEYWORD
from charset_normalizer.utils import is_punctuation, is_symbol, unicode_range, is_accentuated, is_latin, \
remove_accent, is_separator, is_cjk
class MessDetectorPlugin:
Reported by Pylint.
Line: 5
Column: 1
from typing import Optional, List
from charset_normalizer.constant import UNICODE_SECONDARY_RANGE_KEYWORD
from charset_normalizer.utils import is_punctuation, is_symbol, unicode_range, is_accentuated, is_latin, \
remove_accent, is_separator, is_cjk
class MessDetectorPlugin:
"""
Reported by Pylint.
Line: 43
Column: 1
raise NotImplementedError # pragma: nocover
class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin):
def __init__(self):
self._punctuation_count = 0 # type: int
self._symbol_count = 0 # type: int
self._character_count = 0 # type: int
Reported by Pylint.
Line: 59
Column: 1
def feed(self, character: str) -> None:
self._character_count += 1
if character != self._last_printable_char and character not in ["<", ">", "=", ":", "/", "&", ";", "{", "}", "[", "]"]:
if is_punctuation(character):
self._punctuation_count += 1
elif character.isdigit() is False and is_symbol(character):
self._symbol_count += 2
Reported by Pylint.
Line: 77
Column: 1
if self._character_count == 0:
return 0.
ratio_of_punctuation = (self._punctuation_count + self._symbol_count) / self._character_count # type: float
return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.
class TooManyAccentuatedPlugin(MessDetectorPlugin):
Reported by Pylint.
Line: 82
Column: 1
return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.
class TooManyAccentuatedPlugin(MessDetectorPlugin):
def __init__(self):
self._character_count = 0 # type: int
self._accentuated_count = 0 # type: int
Reported by Pylint.
Line: 109
Column: 1
return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.
class UnprintablePlugin(MessDetectorPlugin):
def __init__(self):
self._unprintable_count = 0 # type: int
self._character_count = 0 # type: int
Reported by Pylint.
Line: 134
Column: 1
return (self._unprintable_count * 8) / self._character_count
class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
def __init__(self):
self._successive_count = 0 # type: int
self._character_count = 0 # type: int
Reported by Pylint.
pipenv/vendor/importlib_resources/tests/test_compatibilty_files.py
26 issues
Line: 4
Column: 1
import io
import unittest
import importlib_resources as resources
from importlib_resources._adapters import (
CompatibilityFiles,
wrap_spec,
)
Reported by Pylint.
Line: 6
Column: 1
import importlib_resources as resources
from importlib_resources._adapters import (
CompatibilityFiles,
wrap_spec,
)
from . import util
Reported by Pylint.
Line: 11
Column: 1
wrap_spec,
)
from . import util
class CompatibilityFilesTests(unittest.TestCase):
@property
def package(self):
Reported by Pylint.
Line: 1
Column: 1
import io
import unittest
import importlib_resources as resources
from importlib_resources._adapters import (
CompatibilityFiles,
wrap_spec,
)
Reported by Pylint.
Line: 14
Column: 1
from . import util
class CompatibilityFilesTests(unittest.TestCase):
@property
def package(self):
bytes_data = io.BytesIO(b'Hello, world!')
return util.create_package(
file=bytes_data,
Reported by Pylint.
Line: 16
Column: 5
class CompatibilityFilesTests(unittest.TestCase):
@property
def package(self):
bytes_data = io.BytesIO(b'Hello, world!')
return util.create_package(
file=bytes_data,
path='some_path',
contents=('a', 'b', 'c'),
Reported by Pylint.
Line: 25
Column: 5
)
@property
def files(self):
return resources.files(self.package)
def test_spec_path_iter(self):
self.assertEqual(
sorted(path.name for path in self.files.iterdir()),
Reported by Pylint.
Line: 28
Column: 5
def files(self):
return resources.files(self.package)
def test_spec_path_iter(self):
self.assertEqual(
sorted(path.name for path in self.files.iterdir()),
['a', 'b', 'c'],
)
Reported by Pylint.
Line: 34
Column: 5
['a', 'b', 'c'],
)
def test_child_path_iter(self):
self.assertEqual(list((self.files / 'a').iterdir()), [])
def test_orphan_path_iter(self):
self.assertEqual(list((self.files / 'a' / 'a').iterdir()), [])
self.assertEqual(list((self.files / 'a' / 'a' / 'a').iterdir()), [])
Reported by Pylint.
Line: 37
Column: 5
def test_child_path_iter(self):
self.assertEqual(list((self.files / 'a').iterdir()), [])
def test_orphan_path_iter(self):
self.assertEqual(list((self.files / 'a' / 'a').iterdir()), [])
self.assertEqual(list((self.files / 'a' / 'a' / 'a').iterdir()), [])
def test_spec_path_is(self):
self.assertFalse(self.files.is_file())
Reported by Pylint.
pipenv/vendor/cerberus/utils.py
25 issues
Line: 5
Column: 1
from collections import namedtuple
from cerberus.platform import _int_types, _str_type, Mapping, Sequence, Set
TypeDefinition = namedtuple('TypeDefinition', 'name,included_types,excluded_types')
"""
This class is used to define types that can be used as value in the
Reported by Pylint.
Line: 53
Column: 9
def get_Validator_class():
global Validator
if 'Validator' not in globals():
from cerberus.validator import Validator
return Validator
def mapping_hash(schema):
return hash(mapping_to_frozenset(schema))
Reported by Pylint.
Line: 51
Column: 5
def get_Validator_class():
global Validator
if 'Validator' not in globals():
from cerberus.validator import Validator
return Validator
Reported by Pylint.
Line: 105
Column: 1
raise RuntimeError('This is a readonly class property.')
def validator_factory(name, bases=None, namespace={}):
"""
Dynamically create a :class:`~cerberus.Validator` subclass.
Docstrings of mixin-classes will be added to the resulting class' one if ``__doc__``
is not in :obj:`namespace`.
Reported by Pylint.
Line: 1
Column: 1
from __future__ import absolute_import
from collections import namedtuple
from cerberus.platform import _int_types, _str_type, Mapping, Sequence, Set
TypeDefinition = namedtuple('TypeDefinition', 'name,included_types,excluded_types')
"""
Reported by Pylint.
Line: 20
Column: 1
"""
def compare_paths_lt(x, y):
min_length = min(len(x), len(y))
if x[:min_length] == y[:min_length]:
return len(x) == min_length
Reported by Pylint.
Line: 20
Column: 1
"""
def compare_paths_lt(x, y):
min_length = min(len(x), len(y))
if x[:min_length] == y[:min_length]:
return len(x) == min_length
Reported by Pylint.
Line: 20
Column: 1
"""
def compare_paths_lt(x, y):
min_length = min(len(x), len(y))
if x[:min_length] == y[:min_length]:
return len(x) == min_length
Reported by Pylint.
Line: 27
Column: 12
return len(x) == min_length
for i in range(min_length):
a, b = x[i], y[i]
for _type in (_int_types, _str_type, tuple):
if isinstance(a, _type):
if isinstance(b, _type):
break
Reported by Pylint.
Line: 27
Column: 9
return len(x) == min_length
for i in range(min_length):
a, b = x[i], y[i]
for _type in (_int_types, _str_type, tuple):
if isinstance(a, _type):
if isinstance(b, _type):
break
Reported by Pylint.
pipenv/vendor/vistir/backports/surrogateescape.py
25 issues
Line: 38
Column: 15
_unichr = chr
bytes_chr = lambda code: bytes((code,))
else:
_unichr = unichr # type: ignore
bytes_chr = chr
def surrogateescape_handler(exc):
"""
Reported by Pylint.
Line: 10
Column: 1
# This code is released under the Python license and the BSD 2-clause license
import codecs
import sys
import six
FS_ERRORS = "surrogateescape"
Reported by Pylint.
Line: 65
Column: 9
else:
raise exc
except NotASurrogateError:
raise exc
return (decoded, exc.end)
class NotASurrogateError(Exception):
pass
Reported by Pylint.
Line: 127
Column: 20
return str().join(decoded)
def encodefilename(fn):
if FS_ENCODING == "ascii":
# ASCII encoder of Python 2 expects that the error handler returns a
# Unicode string encodable to ASCII, whereas our surrogateescape error
# handler has to return bytes in 0x80-0xFF range.
encoded = []
Reported by Pylint.
Line: 132
Column: 9
# ASCII encoder of Python 2 expects that the error handler returns a
# Unicode string encodable to ASCII, whereas our surrogateescape error
# handler has to return bytes in 0x80-0xFF range.
encoded = []
for index, ch in enumerate(fn):
code = ord(ch)
if code < 128:
ch = bytes_chr(code)
elif 0xDC80 <= code <= 0xDCFF:
Reported by Pylint.
Line: 167
Column: 20
return fn.encode(FS_ENCODING, FS_ERRORS)
def decodefilename(fn):
return fn.decode(FS_ENCODING, FS_ERRORS)
FS_ENCODING = "ascii"
fn = b("[abc\xff]")
Reported by Pylint.
Line: 20
Column: 1
# FS_ERRORS = 'my_surrogateescape'
def u(text):
if six.PY3:
return text
else:
return text.decode("unicode_escape")
Reported by Pylint.
Line: 20
Column: 1
# FS_ERRORS = 'my_surrogateescape'
def u(text):
if six.PY3:
return text
else:
return text.decode("unicode_escape")
Reported by Pylint.
Line: 21
Column: 5
def u(text):
if six.PY3:
return text
else:
return text.decode("unicode_escape")
Reported by Pylint.
Line: 27
Column: 1
return text.decode("unicode_escape")
def b(data):
if six.PY3:
return data.encode("latin1")
else:
return data
Reported by Pylint.