The following issues were found
youtube_dl/extractor/msn.py
22 issues
Line: 6
Column: 1
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
Reported by Pylint.
Line: 7
Column: 1
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
unescapeHTML,
Reported by Pylint.
Line: 8
Column: 1
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
unescapeHTML,
)
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
Reported by Pylint.
Line: 16
Column: 1
)
class MSNIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|preview)\.)?msn\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/[a-z]{2}-(?P<id>[\da-zA-Z]+)'
_TESTS = [{
'url': 'https://www.msn.com/en-in/money/video/7-ways-to-get-rid-of-chest-congestion/vi-BBPxU6d',
'md5': '087548191d273c5c55d05028f8d2cbcd',
'info_dict': {
Reported by Pylint.
Line: 16
Column: 1
)
class MSNIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|preview)\.)?msn\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/[a-z]{2}-(?P<id>[\da-zA-Z]+)'
_TESTS = [{
'url': 'https://www.msn.com/en-in/money/video/7-ways-to-get-rid-of-chest-congestion/vi-BBPxU6d',
'md5': '087548191d273c5c55d05028f8d2cbcd',
'info_dict': {
Reported by Pylint.
Line: 17
Column: 1
class MSNIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|preview)\.)?msn\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/[a-z]{2}-(?P<id>[\da-zA-Z]+)'
_TESTS = [{
'url': 'https://www.msn.com/en-in/money/video/7-ways-to-get-rid-of-chest-congestion/vi-BBPxU6d',
'md5': '087548191d273c5c55d05028f8d2cbcd',
'info_dict': {
'id': 'BBPxU6d',
Reported by Pylint.
Line: 19
Column: 1
class MSNIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|preview)\.)?msn\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/[a-z]{2}-(?P<id>[\da-zA-Z]+)'
_TESTS = [{
'url': 'https://www.msn.com/en-in/money/video/7-ways-to-get-rid-of-chest-congestion/vi-BBPxU6d',
'md5': '087548191d273c5c55d05028f8d2cbcd',
'info_dict': {
'id': 'BBPxU6d',
'display_id': '7-ways-to-get-rid-of-chest-congestion',
'ext': 'mp4',
Reported by Pylint.
Line: 33
Column: 1
},
}, {
# Article, multiple Dailymotion Embeds
'url': 'https://www.msn.com/en-in/money/sports/hottest-football-wags-greatest-footballers-turned-managers-and-more/ar-BBpc7Nl',
'info_dict': {
'id': 'BBpc7Nl',
},
'playlist_mincount': 4,
}, {
Reported by Pylint.
Line: 39
Column: 1
},
'playlist_mincount': 4,
}, {
'url': 'http://www.msn.com/en-ae/news/offbeat/meet-the-nine-year-old-self-made-millionaire/ar-BBt6ZKf',
'only_matching': True,
}, {
'url': 'http://www.msn.com/en-ae/video/watch/obama-a-lot-of-people-will-be-disappointed/vi-AAhxUMH',
'only_matching': True,
}, {
Reported by Pylint.
youtube_dl/extractor/ciscolive.py
22 issues
Line: 6
Column: 1
import itertools
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
Reported by Pylint.
Line: 7
Column: 1
import itertools
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
clean_html,
Reported by Pylint.
Line: 11
Column: 1
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
clean_html,
float_or_none,
int_or_none,
try_get,
urlencode_postdata,
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import itertools
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
Reported by Pylint.
Line: 20
Column: 1
)
class CiscoLiveBaseIE(InfoExtractor):
# These appear to be constant across all Cisco Live presentations
# and are not tied to any user session or event
RAINFOCUS_API_URL = 'https://events.rainfocus.com/api/%s'
RAINFOCUS_API_PROFILE_ID = 'Na3vqYdAlJFSxhYTYQGuMbpafMqftalz'
RAINFOCUS_WIDGET_ID = 'n6l4Lo05R8fiy3RpUBm447dZN8uNWoye'
Reported by Pylint.
Line: 20
Column: 1
)
class CiscoLiveBaseIE(InfoExtractor):
# These appear to be constant across all Cisco Live presentations
# and are not tied to any user session or event
RAINFOCUS_API_URL = 'https://events.rainfocus.com/api/%s'
RAINFOCUS_API_PROFILE_ID = 'Na3vqYdAlJFSxhYTYQGuMbpafMqftalz'
RAINFOCUS_WIDGET_ID = 'n6l4Lo05R8fiy3RpUBm447dZN8uNWoye'
Reported by Pylint.
Line: 26
Column: 1
RAINFOCUS_API_URL = 'https://events.rainfocus.com/api/%s'
RAINFOCUS_API_PROFILE_ID = 'Na3vqYdAlJFSxhYTYQGuMbpafMqftalz'
RAINFOCUS_WIDGET_ID = 'n6l4Lo05R8fiy3RpUBm447dZN8uNWoye'
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/5647924234001/SyK2FdqjM_default/index.html?videoId=%s'
HEADERS = {
'Origin': 'https://ciscolive.cisco.com',
'rfApiProfileId': RAINFOCUS_API_PROFILE_ID,
'rfWidgetId': RAINFOCUS_WIDGET_ID,
Reported by Pylint.
Line: 34
Column: 5
'rfWidgetId': RAINFOCUS_WIDGET_ID,
}
def _call_api(self, ep, rf_id, query, referrer, note=None):
headers = self.HEADERS.copy()
headers['Referer'] = referrer
return self._download_json(
self.RAINFOCUS_API_URL % ep, rf_id, note=note,
data=urlencode_postdata(query), headers=headers)
Reported by Pylint.
Line: 34
Column: 5
'rfWidgetId': RAINFOCUS_WIDGET_ID,
}
def _call_api(self, ep, rf_id, query, referrer, note=None):
headers = self.HEADERS.copy()
headers['Referer'] = referrer
return self._download_json(
self.RAINFOCUS_API_URL % ep, rf_id, note=note,
data=urlencode_postdata(query), headers=headers)
Reported by Pylint.
Line: 67
Column: 1
}
class CiscoLiveSessionIE(CiscoLiveBaseIE):
_VALID_URL = r'https?://(?:www\.)?ciscolive(?:\.cisco)?\.com/[^#]*#/session/(?P<id>[^/?&]+)'
_TESTS = [{
'url': 'https://ciscolive.cisco.com/on-demand-library/?#/session/1423353499155001FoSs',
'md5': 'c98acf395ed9c9f766941c70f5352e22',
'info_dict': {
Reported by Pylint.
youtube_dl/extractor/packtpub.py
22 issues
Line: 6
Column: 1
import json
import re
from .common import InfoExtractor
from ..compat import (
# compat_str,
compat_HTTPError,
)
from ..utils import (
Reported by Pylint.
Line: 7
Column: 1
import re
from .common import InfoExtractor
from ..compat import (
# compat_str,
compat_HTTPError,
)
from ..utils import (
clean_html,
Reported by Pylint.
Line: 11
Column: 1
# compat_str,
compat_HTTPError,
)
from ..utils import (
clean_html,
ExtractorError,
# remove_end,
str_or_none,
strip_or_none,
Reported by Pylint.
Line: 65
Column: 17
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (400, 401, 404):
message = self._parse_json(e.cause.read().decode(), None)['message']
raise ExtractorError(message, expected=True)
raise
def _real_extract(self, url):
course_id, chapter_id, video_id, display_id = re.match(self._VALID_URL, url).groups()
Reported by Pylint.
Line: 83
Column: 3
self.raise_login_required('This video is locked')
raise
# TODO: find a better way to avoid duplicating course requests
# metadata = self._download_json(
# '%s/products/%s/chapters/%s/sections/%s/metadata'
# % (self._MAPT_REST, course_id, chapter_id, video_id),
# video_id)['data']
Reported by Pylint.
Line: 1
Column: 1
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import (
# compat_str,
compat_HTTPError,
Reported by Pylint.
Line: 22
Column: 1
)
class PacktPubBaseIE(InfoExtractor):
# _PACKT_BASE = 'https://www.packtpub.com'
_STATIC_PRODUCTS_BASE = 'https://static.packt-cdn.com/products/'
class PacktPubIE(PacktPubBaseIE):
Reported by Pylint.
Line: 22
Column: 1
)
class PacktPubBaseIE(InfoExtractor):
# _PACKT_BASE = 'https://www.packtpub.com'
_STATIC_PRODUCTS_BASE = 'https://static.packt-cdn.com/products/'
class PacktPubIE(PacktPubBaseIE):
Reported by Pylint.
Line: 27
Column: 1
_STATIC_PRODUCTS_BASE = 'https://static.packt-cdn.com/products/'
class PacktPubIE(PacktPubBaseIE):
_VALID_URL = r'https?://(?:(?:www\.)?packtpub\.com/mapt|subscription\.packtpub\.com)/video/[^/]+/(?P<course_id>\d+)/(?P<chapter_id>[^/]+)/(?P<id>[^/]+)(?:/(?P<display_id>[^/?&#]+))?'
_TESTS = [{
'url': 'https://www.packtpub.com/mapt/video/web-development/9781787122215/20528/20530/Project+Intro',
'md5': '1e74bd6cfd45d7d07666f4684ef58f70',
Reported by Pylint.
Line: 27
Column: 1
_STATIC_PRODUCTS_BASE = 'https://static.packt-cdn.com/products/'
class PacktPubIE(PacktPubBaseIE):
_VALID_URL = r'https?://(?:(?:www\.)?packtpub\.com/mapt|subscription\.packtpub\.com)/video/[^/]+/(?P<course_id>\d+)/(?P<chapter_id>[^/]+)/(?P<id>[^/]+)(?:/(?P<display_id>[^/?&#]+))?'
_TESTS = [{
'url': 'https://www.packtpub.com/mapt/video/web-development/9781787122215/20528/20530/Project+Intro',
'md5': '1e74bd6cfd45d7d07666f4684ef58f70',
Reported by Pylint.
youtube_dl/extractor/openload.py
22 issues
Line: 9
Column: 1
import subprocess
import tempfile
from ..compat import (
compat_urlparse,
compat_kwargs,
)
from ..utils import (
check_executable,
Reported by Pylint.
Line: 13
Column: 1
compat_urlparse,
compat_kwargs,
)
from ..utils import (
check_executable,
encodeArgument,
ExtractorError,
get_exe_version,
is_outdated_version,
Reported by Pylint.
Line: 144
Column: 38
pass
def _save_cookies(self, url):
cookies = cookie_jar_to_list(self.extractor._downloader.cookiejar)
for cookie in cookies:
if 'path' not in cookie:
cookie['path'] = '/'
if 'domain' not in cookie:
cookie['domain'] = compat_urlparse.urlparse(url).netloc
Reported by Pylint.
Line: 161
Column: 13
cookie['rest'] = {'httpOnly': None}
if 'expiry' in cookie:
cookie['expire_time'] = cookie['expiry']
self.extractor._set_cookie(**compat_kwargs(cookie))
def get(self, url, html=None, video_id=None, note=None, note2='Executing JS on webpage', headers={}, jscode='saveAndExit();'):
"""
Downloads webpage (if needed) and executes JS
Reported by Pylint.
Line: 163
Column: 5
cookie['expire_time'] = cookie['expiry']
self.extractor._set_cookie(**compat_kwargs(cookie))
def get(self, url, html=None, video_id=None, note=None, note2='Executing JS on webpage', headers={}, jscode='saveAndExit();'):
"""
Downloads webpage (if needed) and executes JS
Params:
url: website url
Reported by Pylint.
Line: 202
Column: 20
if 'saveAndExit();' not in jscode:
raise ExtractorError('`saveAndExit();` not found in `jscode`')
if not html:
html = self.extractor._download_webpage(url, video_id, note=note, headers=headers)
with open(self._TMP_FILES['html'].name, 'wb') as f:
f.write(html.encode('utf-8'))
self._save_cookies(url)
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import json
import os
import subprocess
import tempfile
from ..compat import (
Reported by Pylint.
Line: 6
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
import json
import os
import subprocess
import tempfile
from ..compat import (
compat_urlparse,
compat_kwargs,
Reported by Bandit.
Line: 23
Column: 1
)
def cookie_to_dict(cookie):
cookie_dict = {
'name': cookie.name,
'value': cookie.value,
}
if cookie.port_specified:
Reported by Pylint.
Line: 50
Column: 1
return cookie_dict
def cookie_jar_to_list(cookie_jar):
return [cookie_to_dict(cookie) for cookie in cookie_jar]
class PhantomJSwrapper(object):
"""PhantomJS wrapper class
Reported by Pylint.
youtube_dl/extractor/turner.py
22 issues
Line: 6
Column: 1
import re
from .adobepass import AdobePassIE
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
xpath_text,
int_or_none,
Reported by Pylint.
Line: 7
Column: 1
import re
from .adobepass import AdobePassIE
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
xpath_text,
int_or_none,
determine_ext,
Reported by Pylint.
Line: 8
Column: 1
from .adobepass import AdobePassIE
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
xpath_text,
int_or_none,
determine_ext,
float_or_none,
Reported by Pylint.
Line: 53
Column: 5
self._AKAMAI_SPE_TOKEN_CACHE[secure_path] = token
return video_url + '?hdnea=' + token
def _extract_cvp_info(self, data_src, video_id, path_data={}, ap_data={}, fatal=False):
video_data = self._download_xml(
data_src, video_id,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=fatal)
if not video_data:
Reported by Pylint.
Line: 53
Column: 5
self._AKAMAI_SPE_TOKEN_CACHE[secure_path] = token
return video_url + '?hdnea=' + token
def _extract_cvp_info(self, data_src, video_id, path_data={}, ap_data={}, fatal=False):
video_data = self._download_xml(
data_src, video_id,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=fatal)
if not video_data:
Reported by Pylint.
Line: 85
Column: 3
ext = determine_ext(video_url)
if video_url.startswith('/mp4:protected/'):
continue
# TODO Correct extraction for these files
# protected_path_data = path_data.get('protected')
# if not protected_path_data or not rtmp_src:
# continue
# protected_path = self._search_regex(
# r'/mp4:(.+)\.[a-z0-9]', video_url, 'secure path')
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import re
from .adobepass import AdobePassIE
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
Reported by Pylint.
Line: 23
Column: 1
)
class TurnerBaseIE(AdobePassIE):
_AKAMAI_SPE_TOKEN_CACHE = {}
def _extract_timestamp(self, video_data):
return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts'))
Reported by Pylint.
Line: 23
Column: 1
)
class TurnerBaseIE(AdobePassIE):
_AKAMAI_SPE_TOKEN_CACHE = {}
def _extract_timestamp(self, video_data):
return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts'))
Reported by Pylint.
Line: 26
Column: 5
class TurnerBaseIE(AdobePassIE):
_AKAMAI_SPE_TOKEN_CACHE = {}
def _extract_timestamp(self, video_data):
return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts'))
def _add_akamai_spe_token(self, tokenizer_src, video_url, content_id, ap_data, custom_tokenizer_query=None):
secure_path = self._search_regex(r'https?://[^/]+(.+/)', video_url, 'secure path') + '*'
token = self._AKAMAI_SPE_TOKEN_CACHE.get(secure_path)
Reported by Pylint.
devscripts/make_lazy_extractors.py
22 issues
Line: 17
Column: 1
if os.path.exists(lazy_extractors_filename):
os.remove(lazy_extractors_filename)
from youtube_dl.extractor import _ALL_CLASSES
from youtube_dl.extractor.common import InfoExtractor, SearchInfoExtractor
with open('devscripts/lazy_load_template.py', 'rt') as f:
module_template = f.read()
Reported by Pylint.
Line: 18
Column: 1
os.remove(lazy_extractors_filename)
from youtube_dl.extractor import _ALL_CLASSES
from youtube_dl.extractor.common import InfoExtractor, SearchInfoExtractor
with open('devscripts/lazy_load_template.py', 'rt') as f:
module_template = f.read()
module_contents = [
Reported by Pylint.
Line: 49
Column: 23
return base.__name__
def build_lazy_ie(ie, name):
valid_url = getattr(ie, '_VALID_URL', None)
s = ie_template.format(
name=name,
bases=', '.join(map(get_base_name, ie.__bases__)),
valid_url=valid_url,
Reported by Pylint.
Line: 49
Column: 19
return base.__name__
def build_lazy_ie(ie, name):
valid_url = getattr(ie, '_VALID_URL', None)
s = ie_template.format(
name=name,
bases=', '.join(map(get_base_name, ie.__bases__)),
valid_url=valid_url,
Reported by Pylint.
Line: 60
Column: 51
s += '\n' + getsource(ie.suitable)
if hasattr(ie, '_make_valid_url'):
# search extractors
s += make_valid_template.format(valid_url=ie._make_valid_url())
return s
# find the correct sorting and add the required base classes so that subclasses
# can be correctly created
Reported by Pylint.
Line: 1
Column: 1
from __future__ import unicode_literals, print_function
from inspect import getsource
import io
import os
from os.path import dirname as dirn
import sys
print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr)
Reported by Pylint.
Line: 9
Column: 1
from os.path import dirname as dirn
import sys
print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr)
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
lazy_extractors_filename = sys.argv[1]
if os.path.exists(lazy_extractors_filename):
Reported by Pylint.
Line: 17
Column: 1
if os.path.exists(lazy_extractors_filename):
os.remove(lazy_extractors_filename)
from youtube_dl.extractor import _ALL_CLASSES
from youtube_dl.extractor.common import InfoExtractor, SearchInfoExtractor
with open('devscripts/lazy_load_template.py', 'rt') as f:
module_template = f.read()
Reported by Pylint.
Line: 18
Column: 1
os.remove(lazy_extractors_filename)
from youtube_dl.extractor import _ALL_CLASSES
from youtube_dl.extractor.common import InfoExtractor, SearchInfoExtractor
with open('devscripts/lazy_load_template.py', 'rt') as f:
module_template = f.read()
module_contents = [
Reported by Pylint.
Line: 27
Column: 1
module_template + '\n' + getsource(InfoExtractor.suitable) + '\n',
'class LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n']
ie_template = '''
class {name}({bases}):
_VALID_URL = {valid_url!r}
_module = '{module}'
'''
Reported by Pylint.
youtube_dl/extractor/mailru.py
21 issues
Line: 8
Column: 1
import json
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
int_or_none,
parse_duration,
remove_end,
Reported by Pylint.
Line: 9
Column: 1
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
int_or_none,
parse_duration,
remove_end,
try_get,
Reported by Pylint.
Line: 10
Column: 1
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
int_or_none,
parse_duration,
remove_end,
try_get,
)
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
Reported by Pylint.
Line: 18
Column: 1
)
class MailRuIE(InfoExtractor):
IE_NAME = 'mailru'
IE_DESC = 'Видео@Mail.Ru'
_VALID_URL = r'''(?x)
https?://
(?:(?:www|m)\.)?my\.mail\.ru/+
Reported by Pylint.
Line: 18
Column: 1
)
class MailRuIE(InfoExtractor):
IE_NAME = 'mailru'
IE_DESC = 'Видео@Mail.Ru'
_VALID_URL = r'''(?x)
https?://
(?:(?:www|m)\.)?my\.mail\.ru/+
Reported by Pylint.
Line: 99
Column: 5
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
meta_id = mobj.group('metaid')
video_id = None
if meta_id:
Reported by Pylint.
Line: 138
Column: 13
headers['Cookie'] = 'video_key=%s' % video_key.value
formats = []
for f in video_data['videos']:
video_url = f.get('url')
if not video_url:
continue
format_id = f.get('key')
height = int_or_none(self._search_regex(
Reported by Pylint.
Line: 182
Column: 1
}
class MailRuMusicSearchBaseIE(InfoExtractor):
def _search(self, query, url, audio_id, limit=100, offset=0):
search = self._download_json(
'https://my.mail.ru/cgi-bin/my/ajax', audio_id,
'Downloading songs JSON page %d' % (offset // limit + 1),
headers={
Reported by Pylint.
Line: 182
Column: 1
}
class MailRuMusicSearchBaseIE(InfoExtractor):
def _search(self, query, url, audio_id, limit=100, offset=0):
search = self._download_json(
'https://my.mail.ru/cgi-bin/my/ajax', audio_id,
'Downloading songs JSON page %d' % (offset // limit + 1),
headers={
Reported by Pylint.
youtube_dl/extractor/vlive.py
21 issues
Line: 7
Column: 1
import itertools
import json
from .naver import NaverBaseIE
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
Reported by Pylint.
Line: 8
Column: 1
import json
from .naver import NaverBaseIE
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
ExtractorError,
Reported by Pylint.
Line: 12
Column: 1
compat_HTTPError,
compat_str,
)
from ..utils import (
ExtractorError,
int_or_none,
merge_dicts,
str_or_none,
strip_or_none,
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
from .naver import NaverBaseIE
from ..compat import (
compat_HTTPError,
Reported by Pylint.
Line: 23
Column: 1
)
class VLiveBaseIE(NaverBaseIE):
_APP_ID = '8c6cc7b45d2568fb668be6e05b6e5a3b'
class VLiveIE(VLiveBaseIE):
IE_NAME = 'vlive'
Reported by Pylint.
Line: 23
Column: 1
)
class VLiveBaseIE(NaverBaseIE):
_APP_ID = '8c6cc7b45d2568fb668be6e05b6e5a3b'
class VLiveIE(VLiveBaseIE):
IE_NAME = 'vlive'
Reported by Pylint.
Line: 27
Column: 1
_APP_ID = '8c6cc7b45d2568fb668be6e05b6e5a3b'
class VLiveIE(VLiveBaseIE):
IE_NAME = 'vlive'
_VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/(?:video|embed)/(?P<id>[0-9]+)'
_NETRC_MACHINE = 'vlive'
_TESTS = [{
'url': 'http://www.vlive.tv/video/1326',
Reported by Pylint.
Line: 27
Column: 1
_APP_ID = '8c6cc7b45d2568fb668be6e05b6e5a3b'
class VLiveIE(VLiveBaseIE):
IE_NAME = 'vlive'
_VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/(?:video|embed)/(?P<id>[0-9]+)'
_NETRC_MACHINE = 'vlive'
_TESTS = [{
'url': 'http://www.vlive.tv/video/1326',
Reported by Pylint.
Line: 93
Column: 9
return try_get(
login_info, lambda x: x['message']['login'], bool) or False
LOGIN_URL = 'https://www.vlive.tv/auth/email/login'
self._request_webpage(
LOGIN_URL, None, note='Downloading login cookies')
self._download_webpage(
LOGIN_URL, None, note='Logging in',
Reported by Pylint.
Line: 117
Column: 9
'https://www.vlive.tv/globalv-web/vam-web/' + path_template % video_id, video_id,
'Downloading %s JSON metadata' % path_template.split('/')[-1].split('-')[0],
headers={'Referer': 'https://www.vlive.tv/'}, query=query)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
self.raise_login_required(json.loads(e.cause.read().decode('utf-8'))['message'])
raise
def _real_extract(self, url):
Reported by Pylint.
youtube_dl/extractor/qqmusic.py
21 issues
Line: 8
Column: 1
import re
import time
from .common import InfoExtractor
from ..utils import (
clean_html,
ExtractorError,
strip_jsonp,
unescapeHTML,
Reported by Pylint.
Line: 9
Column: 1
import time
from .common import InfoExtractor
from ..utils import (
clean_html,
ExtractorError,
strip_jsonp,
unescapeHTML,
)
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import random
import re
import time
from .common import InfoExtractor
from ..utils import (
Reported by Pylint.
Line: 17
Column: 1
)
class QQMusicIE(InfoExtractor):
IE_NAME = 'qqmusic'
IE_DESC = 'QQ音乐'
_VALID_URL = r'https?://y\.qq\.com/n/yqq/song/(?P<id>[0-9A-Za-z]+)\.html'
_TESTS = [{
'url': 'https://y.qq.com/n/yqq/song/004295Et37taLD.html',
Reported by Pylint.
Line: 17
Column: 1
)
class QQMusicIE(InfoExtractor):
IE_NAME = 'qqmusic'
IE_DESC = 'QQ音乐'
_VALID_URL = r'https?://y\.qq\.com/n/yqq/song/(?P<id>[0-9A-Za-z]+)\.html'
_TESTS = [{
'url': 'https://y.qq.com/n/yqq/song/004295Et37taLD.html',
Reported by Pylint.
Line: 72
Column: 5
# Reference: m_r_GetRUin() in top_player.js
# http://imgcache.gtimg.cn/music/portal_v3/y/top_player.js
@staticmethod
def m_r_get_ruin():
curMs = int(time.time() * 1000) % 1000
return int(round(random.random() * 2147483647) * curMs % 1E10)
def _real_extract(self, url):
mid = self._match_id(url)
Reported by Pylint.
Line: 73
Column: 9
# http://imgcache.gtimg.cn/music/portal_v3/y/top_player.js
@staticmethod
def m_r_get_ruin():
curMs = int(time.time() * 1000) % 1000
return int(round(random.random() * 2147483647) * curMs % 1E10)
def _real_extract(self, url):
mid = self._match_id(url)
Reported by Pylint.
Line: 74
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b311-random
@staticmethod
def m_r_get_ruin():
curMs = int(time.time() * 1000) % 1000
return int(round(random.random() * 2147483647) * curMs % 1E10)
def _real_extract(self, url):
mid = self._match_id(url)
detail_info_page = self._download_webpage(
Reported by Bandit.
Line: 76
Column: 5
curMs = int(time.time() * 1000) % 1000
return int(round(random.random() * 2147483647) * curMs % 1E10)
def _real_extract(self, url):
mid = self._match_id(url)
detail_info_page = self._download_webpage(
'http://s.plcloud.music.qq.com/fcgi-bin/fcg_yqq_song_detail_info.fcg?songmid=%s&play=0' % mid,
mid, note='Download song detail info',
Reported by Pylint.
Line: 80
Column: 1
mid = self._match_id(url)
detail_info_page = self._download_webpage(
'http://s.plcloud.music.qq.com/fcgi-bin/fcg_yqq_song_detail_info.fcg?songmid=%s&play=0' % mid,
mid, note='Download song detail info',
errnote='Unable to get song detail info', encoding='gbk')
song_name = self._html_search_regex(
r"songname:\s*'([^']+)'", detail_info_page, 'song name')
Reported by Pylint.
youtube_dl/extractor/tagesschau.py
21 issues
Line: 6
Column: 1
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
js_to_json,
parse_iso8601,
parse_filesize,
Reported by Pylint.
Line: 7
Column: 1
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
js_to_json,
parse_iso8601,
parse_filesize,
)
Reported by Pylint.
Line: 225
Column: 13
format_id = self._search_regex(
r'.*/[^/.]+\.([^/]+)\.[^/.]+$', link_url, 'format ID',
default=determine_ext(link_url))
format = {
'format_id': format_id,
'url': l.group('url'),
'format_name': l.group('name'),
}
title = l.group('title')
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
js_to_json,
Reported by Pylint.
Line: 15
Column: 1
)
class TagesschauPlayerIE(InfoExtractor):
IE_NAME = 'tagesschau:player'
_VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?P<kind>audio|video)/(?P=kind)-(?P<id>\d+)~player(?:_[^/?#&]+)?\.html'
_TESTS = [{
'url': 'http://www.tagesschau.de/multimedia/video/video-179517~player.html',
Reported by Pylint.
Line: 15
Column: 1
)
class TagesschauPlayerIE(InfoExtractor):
IE_NAME = 'tagesschau:player'
_VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?P<kind>audio|video)/(?P=kind)-(?P<id>\d+)~player(?:_[^/?#&]+)?\.html'
_TESTS = [{
'url': 'http://www.tagesschau.de/multimedia/video/video-179517~player.html',
Reported by Pylint.
Line: 17
Column: 1
class TagesschauPlayerIE(InfoExtractor):
IE_NAME = 'tagesschau:player'
_VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?P<kind>audio|video)/(?P=kind)-(?P<id>\d+)~player(?:_[^/?#&]+)?\.html'
_TESTS = [{
'url': 'http://www.tagesschau.de/multimedia/video/video-179517~player.html',
'md5': '8d09548d5c15debad38bee3a4d15ca21',
'info_dict': {
Reported by Pylint.
Line: 80
Column: 5
'formats': formats,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
# kind = mobj.group('kind').lower()
# if kind == 'video':
Reported by Pylint.
Line: 106
Column: 13
quality = media.get('quality')
kind = media.get('type', '').split('/')[0]
ext = determine_ext(src)
f = {
'url': src,
'format_id': '%s_%s' % (quality, ext) if quality else ext,
'ext': ext,
'vcodec': 'none' if kind == 'audio' else None,
}
Reported by Pylint.
Line: 127
Column: 1
}
class TagesschauIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tagesschau\.de/(?P<path>[^/]+/(?:[^/]+/)*?(?P<id>[^/#?]+?(?:-?[0-9]+)?))(?:~_?[^/#?]+?)?\.html'
_TESTS = [{
'url': 'http://www.tagesschau.de/multimedia/video/video-102143.html',
'md5': 'f7c27a0eff3bfe8c7727e65f8fe1b1e6',
Reported by Pylint.