The following issues were found
youtube_dl/extractor/leeco.py
32 issues
Line: 9
Column: 1
import re
import time
from .common import InfoExtractor
from ..compat import (
compat_b64decode,
compat_ord,
compat_str,
compat_urllib_parse_urlencode,
Reported by Pylint.
Line: 10
Column: 1
import time
from .common import InfoExtractor
from ..compat import (
compat_b64decode,
compat_ord,
compat_str,
compat_urllib_parse_urlencode,
)
Reported by Pylint.
Line: 16
Column: 1
compat_str,
compat_urllib_parse_urlencode,
)
from ..utils import (
determine_ext,
encode_data_uri,
ExtractorError,
int_or_none,
orderedSet,
Reported by Pylint.
Line: 248
Column: 36
# Currently old domain names are still used in playlists
media_ids = orderedSet(re.findall(
r'<a[^>]+href="http://www\.letv\.com/ptv/vplay/(\d+)\.html', page))
entries = [self.url_result(LeIE._URL_TEMPLATE % media_id, ie='Le')
for media_id in media_ids]
title = self._html_search_meta('keywords', page,
fatal=False).split(',')[0]
description = self._html_search_meta('description', page, fatal=False)
Reported by Pylint.
Line: 300
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b303-md5
salt = 'fbeh5player12c43eccf2bec3300344'
items = ['cf', 'ran', 'uu', 'bver', 'vu']
input_data = ''.join([item + obj[item] for item in items]) + salt
obj['sign'] = hashlib.md5(input_data.encode('utf-8')).hexdigest()
def _get_formats(self, cf, uu, vu, media_id):
def get_play_json(cf, timestamp):
data = {
'cf': cf,
Reported by Bandit.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import datetime
import hashlib
import re
import time
from .common import InfoExtractor
Reported by Pylint.
Line: 29
Column: 1
)
class LeIE(InfoExtractor):
IE_DESC = '乐视网'
_VALID_URL = r'https?://(?:www\.le\.com/ptv/vplay|(?:sports\.le|(?:www\.)?lesports)\.com/(?:match|video))/(?P<id>\d+)\.html'
_GEO_COUNTRIES = ['CN']
_URL_TEMPLATE = 'http://www.le.com/ptv/vplay/%s.html'
Reported by Pylint.
Line: 31
Column: 1
class LeIE(InfoExtractor):
IE_DESC = '乐视网'
_VALID_URL = r'https?://(?:www\.le\.com/ptv/vplay|(?:sports\.le|(?:www\.)?lesports)\.com/(?:match|video))/(?P<id>\d+)\.html'
_GEO_COUNTRIES = ['CN']
_URL_TEMPLATE = 'http://www.le.com/ptv/vplay/%s.html'
_TESTS = [{
'url': 'http://www.le.com/ptv/vplay/22005890.html',
Reported by Pylint.
Line: 83
Column: 5
}]
# ror() and calc_time_key() are reversed from a embedded swf file in LetvPlayer.swf
def ror(self, param1, param2):
_loc3_ = 0
while _loc3_ < param2:
param1 = urshift(param1, 1) + ((param1 & 1) << 31)
_loc3_ += 1
return param1
Reported by Pylint.
Line: 83
Column: 5
}]
# ror() and calc_time_key() are reversed from a embedded swf file in LetvPlayer.swf
def ror(self, param1, param2):
_loc3_ = 0
while _loc3_ < param2:
param1 = urshift(param1, 1) + ((param1 & 1) << 31)
_loc3_ += 1
return param1
Reported by Pylint.
test/test_download.py
32 issues
Line: 61
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b303-md5
def _file_md5(fn):
with open(fn, 'rb') as f:
return hashlib.md5(f.read()).hexdigest()
defs = gettestcases()
Reported by Bandit.
Line: 92
Column: 15
# Dynamically generate tests
def generator(test_case, tname):
def test_template(self):
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])()
other_ies = [get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', [])]
is_playlist = any(k.startswith('playlist') for k in test_case)
Reported by Pylint.
Line: 92
Column: 26
# Dynamically generate tests
def generator(test_case, tname):
def test_template(self):
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])()
other_ies = [get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', [])]
is_playlist = any(k.startswith('playlist') for k in test_case)
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
Reported by Pylint.
Line: 11
Column: 1
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import (
assertGreaterEqual,
expect_warnings,
get_params,
gettestcases,
expect_info_dict,
Reported by Pylint.
Line: 22
Column: 1
)
import hashlib
import io
import json
import socket
import youtube_dl.YoutubeDL
Reported by Pylint.
Line: 23
Column: 1
import hashlib
import io
import json
import socket
import youtube_dl.YoutubeDL
from youtube_dl.compat import (
Reported by Pylint.
Line: 24
Column: 1
import hashlib
import io
import json
import socket
import youtube_dl.YoutubeDL
from youtube_dl.compat import (
compat_http_client,
Reported by Pylint.
Line: 25
Column: 1
import hashlib
import io
import json
import socket
import youtube_dl.YoutubeDL
from youtube_dl.compat import (
compat_http_client,
compat_urllib_error,
Reported by Pylint.
Line: 27
Column: 1
import json
import socket
import youtube_dl.YoutubeDL
from youtube_dl.compat import (
compat_http_client,
compat_urllib_error,
compat_HTTPError,
)
Reported by Pylint.
youtube_dl/extractor/kaltura.py
31 issues
Line: 7
Column: 1
import re
import base64
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
compat_parse_qs,
)
from ..utils import (
Reported by Pylint.
Line: 8
Column: 1
import base64
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
compat_parse_qs,
)
from ..utils import (
clean_html,
Reported by Pylint.
Line: 12
Column: 1
compat_urlparse,
compat_parse_qs,
)
from ..utils import (
clean_html,
ExtractorError,
int_or_none,
unsmuggle_url,
smuggle_url,
Reported by Pylint.
Line: 172
Column: 5
urls.append(url)
return urls
def _kaltura_api_call(self, video_id, actions, service_url=None, *args, **kwargs):
params = actions[0]
if len(actions) > 1:
for i, a in enumerate(actions[1:], start=1):
for k, v in a.items():
params['%d:%s' % (i, k)] = v
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import re
import base64
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
Reported by Pylint.
Line: 21
Column: 1
)
class KalturaIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
kaltura:(?P<partner_id>\d+):(?P<id>[0-9a-z_]+)|
https?://
(:?(?:www|cdnapi(?:sec)?)\.)?kaltura\.com(?::\d+)?/
Reported by Pylint.
Line: 21
Column: 1
)
class KalturaIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
kaltura:(?P<partner_id>\d+):(?P<id>[0-9a-z_]+)|
https?://
(:?(?:www|cdnapi(?:sec)?)\.)?kaltura\.com(?::\d+)?/
Reported by Pylint.
Line: 39
Column: 1
'''
_SERVICE_URL = 'http://cdnapi.kaltura.com'
_SERVICE_BASE = '/api_v3/index.php'
# See https://github.com/kaltura/server/blob/master/plugins/content/caption/base/lib/model/enums/CaptionType.php
_CAPTION_TYPES = {
1: 'srt',
2: 'ttml',
3: 'vtt',
}
Reported by Pylint.
Line: 61
Column: 1
},
},
{
'url': 'http://www.kaltura.com/index.php/kwidget/cache_st/1300318621/wid/_269692/uiconf_id/3873291/entry_id/1_1jc2y3e4',
'only_matching': True,
},
{
'url': 'https://cdnapisec.kaltura.com/index.php/kwidget/wid/_557781/uiconf_id/22845202/entry_id/1_plr1syf3',
'only_matching': True,
Reported by Pylint.
Line: 65
Column: 1
'only_matching': True,
},
{
'url': 'https://cdnapisec.kaltura.com/index.php/kwidget/wid/_557781/uiconf_id/22845202/entry_id/1_plr1syf3',
'only_matching': True,
},
{
'url': 'https://cdnapisec.kaltura.com/html5/html5lib/v2.30.2/mwEmbedFrame.php/p/1337/uiconf_id/20540612/entry_id/1_sf5ovm7u?wid=_243342',
'only_matching': True,
Reported by Pylint.
youtube_dl/extractor/bilibili.py
30 issues
Line: 7
Column: 1
import hashlib
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
from ..utils import (
Reported by Pylint.
Line: 8
Column: 1
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
from ..utils import (
ExtractorError,
Reported by Pylint.
Line: 12
Column: 1
compat_parse_qs,
compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
parse_iso8601,
smuggle_url,
Reported by Pylint.
Line: 169
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b303-md5
RENDITIONS = ('qn=80&quality=80&type=', 'quality=2&type=mp4')
for num, rendition in enumerate(RENDITIONS, start=1):
payload = 'appkey=%s&cid=%s&otype=json&%s' % (self._APP_KEY, cid, rendition)
sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest()
video_info = self._download_json(
'http://interface.bilibili.com/v2/playurl?%s&sign=%s' % (payload, sign),
video_id, note='Downloading video info page',
headers=headers, fatal=num == len(RENDITIONS))
Reported by Bandit.
Line: 221
Column: 3
'uploadDate', webpage, 'timestamp', default=None))
thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage)
# TODO 'view_count' requires deobfuscating Javascript
info = {
'id': video_id,
'title': title,
'description': description,
'timestamp': timestamp,
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import hashlib
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
Reported by Pylint.
Line: 26
Column: 1
)
class BiliBiliIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:(?:www|bangumi)\.)?
bilibili\.(?:tv|com)/
(?:
Reported by Pylint.
Line: 26
Column: 1
)
class BiliBiliIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:(?:www|bangumi)\.)?
bilibili\.(?:tv|com)/
(?:
Reported by Pylint.
Line: 66
Column: 1
'id': '100643',
'ext': 'mp4',
'title': 'CHAOS;CHILD',
'description': '如果你是神明,并且能够让妄想成为现实。那你会进行怎么样的妄想?是淫靡的世界?独裁社会?毁灭性的制裁?还是……2015年,涩谷。从6年前发生的大灾害“涩谷地震”之后复兴了的这个街区里新设立的私立高中...',
},
'skip': 'Geo-restricted to China',
}, {
# Title with double quotes
'url': 'http://www.bilibili.com/video/av8903802/',
Reported by Pylint.
Line: 116
Column: 9
_BILIBILI_KEY = 'aHRmhWMLkdeMuILqORnYZocwMBpMEOdt'
def _report_error(self, result):
if 'message' in result:
raise ExtractorError('%s said: %s' % (self.IE_NAME, result['message']), expected=True)
elif 'code' in result:
raise ExtractorError('%s returns error %d' % (self.IE_NAME, result['code']), expected=True)
else:
raise ExtractorError('Can\'t extract Bangumi episode ID')
Reported by Pylint.
youtube_dl/extractor/tnaflix.py
30 issues
Line: 5
Column: 1
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
float_or_none,
int_or_none,
Reported by Pylint.
Line: 6
Column: 1
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
float_or_none,
int_or_none,
parse_duration,
Reported by Pylint.
Line: 7
Column: 1
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
float_or_none,
int_or_none,
parse_duration,
str_to_int,
Reported by Pylint.
Line: 1
Column: 1
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
float_or_none,
Reported by Pylint.
Line: 18
Column: 1
)
class TNAFlixNetworkBaseIE(InfoExtractor):
# May be overridden in descendants if necessary
_CONFIG_REGEX = [
r'flashvars\.config\s*=\s*escape\("(?P<url>[^"]+)"',
r'<input[^>]+name="config\d?" value="(?P<url>[^"]+)"',
r'config\s*=\s*(["\'])(?P<url>(?:https?:)?//(?:(?!\1).)+)\1',
Reported by Pylint.
Line: 18
Column: 1
)
class TNAFlixNetworkBaseIE(InfoExtractor):
# May be overridden in descendants if necessary
_CONFIG_REGEX = [
r'flashvars\.config\s*=\s*escape\("(?P<url>[^"]+)"',
r'<input[^>]+name="config\d?" value="(?P<url>[^"]+)"',
r'config\s*=\s*(["\'])(?P<url>(?:https?:)?//(?:(?!\1).)+)\1',
Reported by Pylint.
Line: 33
Column: 1
_VIEW_COUNT_REGEX = None
_COMMENT_COUNT_REGEX = None
_AVERAGE_RATING_REGEX = None
_CATEGORIES_REGEX = r'<li[^>]*>\s*<span[^>]+class="infoTitle"[^>]*>Categories:</span>\s*<span[^>]+class="listView"[^>]*>(.+?)</span>\s*</li>'
def _extract_thumbnails(self, flix_xml):
def get_child(elem, names):
for name in names:
Reported by Pylint.
Line: 35
Column: 5
_AVERAGE_RATING_REGEX = None
_CATEGORIES_REGEX = r'<li[^>]*>\s*<span[^>]+class="infoTitle"[^>]*>Categories:</span>\s*<span[^>]+class="listView"[^>]*>(.+?)</span>\s*</li>'
def _extract_thumbnails(self, flix_xml):
def get_child(elem, names):
for name in names:
child = elem.find(name)
if child is not None:
Reported by Pylint.
Line: 37
Column: 9
def _extract_thumbnails(self, flix_xml):
def get_child(elem, names):
for name in names:
child = elem.find(name)
if child is not None:
return child
Reported by Pylint.
Line: 75
Column: 5
'height': height,
} for i in range(first, last + 1)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
for display_id_key in ('display_id', 'display_id_2'):
if display_id_key in mobj.groupdict():
display_id = mobj.group(display_id_key)
Reported by Pylint.
youtube_dl/extractor/gedidigital.py
30 issues
Line: 6
Column: 1
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
)
Reported by Pylint.
Line: 7
Column: 1
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
)
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
Reported by Pylint.
Line: 13
Column: 1
)
class GediDigitalIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://video\.
(?:
(?:
(?:espresso\.)?repubblica
|lastampa
Reported by Pylint.
Line: 13
Column: 1
)
class GediDigitalIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://video\.
(?:
(?:
(?:espresso\.)?repubblica
|lastampa
Reported by Pylint.
Line: 37
Column: 1
)\.gelocal
)\.it(?:/[^/]+){2,3}?/(?P<id>\d+)(?:[/?&#]|$)'''
_TESTS = [{
'url': 'https://video.lastampa.it/politica/il-paradosso-delle-regionali-la-lega-vince-ma-sembra-aver-perso/121559/121683',
'md5': '84658d7fb9e55a6e57ecc77b73137494',
'info_dict': {
'id': '121559',
'ext': 'mp4',
'title': 'Il paradosso delle Regionali: ecco perché la Lega vince ma sembra aver perso',
Reported by Pylint.
Line: 48
Column: 1
'duration': 125,
},
}, {
'url': 'https://video.espresso.repubblica.it/embed/tutti-i-video/01-ted-villa/14772/14870&width=640&height=360',
'only_matching': True,
}, {
'url': 'https://video.repubblica.it/motori/record-della-pista-a-spa-francorchamps-la-pagani-huayra-roadster-bc-stupisce/367415/367963',
'only_matching': True,
}, {
Reported by Pylint.
Line: 51
Column: 1
'url': 'https://video.espresso.repubblica.it/embed/tutti-i-video/01-ted-villa/14772/14870&width=640&height=360',
'only_matching': True,
}, {
'url': 'https://video.repubblica.it/motori/record-della-pista-a-spa-francorchamps-la-pagani-huayra-roadster-bc-stupisce/367415/367963',
'only_matching': True,
}, {
'url': 'https://video.ilsecoloxix.it/sport/cassani-e-i-brividi-azzurri-ai-mondiali-di-imola-qui-mi-sono-innamorato-del-ciclismo-da-ragazzino-incredibile-tornarci-da-ct/66184/66267',
'only_matching': True,
}, {
Reported by Pylint.
Line: 54
Column: 1
'url': 'https://video.repubblica.it/motori/record-della-pista-a-spa-francorchamps-la-pagani-huayra-roadster-bc-stupisce/367415/367963',
'only_matching': True,
}, {
'url': 'https://video.ilsecoloxix.it/sport/cassani-e-i-brividi-azzurri-ai-mondiali-di-imola-qui-mi-sono-innamorato-del-ciclismo-da-ragazzino-incredibile-tornarci-da-ct/66184/66267',
'only_matching': True,
}, {
'url': 'https://video.iltirreno.gelocal.it/sport/dentro-la-notizia-ferrari-cosa-succede-a-maranello/141059/142723',
'only_matching': True,
}, {
Reported by Pylint.
Line: 57
Column: 1
'url': 'https://video.ilsecoloxix.it/sport/cassani-e-i-brividi-azzurri-ai-mondiali-di-imola-qui-mi-sono-innamorato-del-ciclismo-da-ragazzino-incredibile-tornarci-da-ct/66184/66267',
'only_matching': True,
}, {
'url': 'https://video.iltirreno.gelocal.it/sport/dentro-la-notizia-ferrari-cosa-succede-a-maranello/141059/142723',
'only_matching': True,
}, {
'url': 'https://video.messaggeroveneto.gelocal.it/locale/maria-giovanna-elmi-covid-vaccino/138155/139268',
'only_matching': True,
}, {
Reported by Pylint.
youtube_dl/extractor/vice.py
30 issues
Line: 11
Column: 1
import re
import time
from .adobepass import AdobePassIE
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_HTTPError,
compat_str,
Reported by Pylint.
Line: 12
Column: 1
import time
from .adobepass import AdobePassIE
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_HTTPError,
compat_str,
)
Reported by Pylint.
Line: 13
Column: 1
from .adobepass import AdobePassIE
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
Reported by Pylint.
Line: 14
Column: 1
from .adobepass import AdobePassIE
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
clean_html,
Reported by Pylint.
Line: 18
Column: 1
compat_HTTPError,
compat_str,
)
from ..utils import (
clean_html,
ExtractorError,
int_or_none,
OnDemandPagedList,
parse_age_limit,
Reported by Pylint.
Line: 160
Column: 17
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (400, 401):
error = json.loads(e.cause.read().decode())
error_message = error.get('error_description') or error['details']
raise ExtractorError('%s said: %s' % (
self.IE_NAME, error_message), expected=True)
raise
video_data = preplay['video']
formats = self._extract_m3u8_formats(
Reported by Pylint.
Line: 319
Column: 20
'ie_key': ie_key,
}
vice_url = ViceIE._extract_url(body)
if vice_url:
return _url_res(vice_url, ViceIE.ie_key())
embed_code = self._search_regex(
r'embedCode=([^&\'"]+)', body,
Reported by Pylint.
Line: 329
Column: 23
if embed_code:
return _url_res('ooyala:%s' % embed_code, 'Ooyala')
youtube_url = YoutubeIE._extract_url(body)
if youtube_url:
return _url_res(youtube_url, YoutubeIE.ie_key())
video_url = self._html_search_regex(
r'data-video-url="([^"]+)"',
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import functools
import hashlib
import json
import random
import re
import time
Reported by Pylint.
Line: 29
Column: 1
)
class ViceBaseIE(InfoExtractor):
def _call_api(self, resource, resource_key, resource_id, locale, fields, args=''):
return self._download_json(
'https://video.vice.com/api/v1/graphql', resource_id, query={
'query': '''{
%s(locale: "%s", %s: "%s"%s) {
Reported by Pylint.
youtube_dl/extractor/daum.py
30 issues
Line: 7
Column: 1
import itertools
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urlparse,
)
Reported by Pylint.
Line: 8
Column: 1
import itertools
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urlparse,
)
Reported by Pylint.
Line: 163
Column: 40
clip_id = query_dict['clipid'][0]
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % clip_id)
return self.url_result(DaumClipIE._URL_TEMPLATE % clip_id, 'DaumClip')
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % list_id)
class DaumPlaylistIE(DaumListIE):
Reported by Pylint.
Line: 262
Column: 36
query_dict = compat_parse_qs(compat_urlparse.urlparse(url).query)
if 'playlistid' in query_dict:
playlist_id = query_dict['playlistid'][0]
return self.url_result(DaumPlaylistIE._URL_TEMPLATE % playlist_id, 'DaumPlaylist')
name, entries = self._get_entries(list_id, 'ownerid')
return self.playlist_result(entries, list_id, name)
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import itertools
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
Reported by Pylint.
Line: 15
Column: 1
)
class DaumBaseIE(InfoExtractor):
_KAKAO_EMBED_BASE = 'http://tv.kakao.com/embed/player/cliplink/'
class DaumIE(DaumBaseIE):
_VALID_URL = r'https?://(?:(?:m\.)?tvpot\.daum\.net/v/|videofarm\.daum\.net/controller/player/VodPlayer\.swf\?vid=)(?P<id>[^?#&]+)'
Reported by Pylint.
Line: 15
Column: 1
)
class DaumBaseIE(InfoExtractor):
_KAKAO_EMBED_BASE = 'http://tv.kakao.com/embed/player/cliplink/'
class DaumIE(DaumBaseIE):
_VALID_URL = r'https?://(?:(?:m\.)?tvpot\.daum\.net/v/|videofarm\.daum\.net/controller/player/VodPlayer\.swf\?vid=)(?P<id>[^?#&]+)'
Reported by Pylint.
Line: 19
Column: 1
_KAKAO_EMBED_BASE = 'http://tv.kakao.com/embed/player/cliplink/'
class DaumIE(DaumBaseIE):
_VALID_URL = r'https?://(?:(?:m\.)?tvpot\.daum\.net/v/|videofarm\.daum\.net/controller/player/VodPlayer\.swf\?vid=)(?P<id>[^?#&]+)'
IE_NAME = 'daum.net'
_TESTS = [{
'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',
Reported by Pylint.
Line: 19
Column: 1
_KAKAO_EMBED_BASE = 'http://tv.kakao.com/embed/player/cliplink/'
class DaumIE(DaumBaseIE):
_VALID_URL = r'https?://(?:(?:m\.)?tvpot\.daum\.net/v/|videofarm\.daum\.net/controller/player/VodPlayer\.swf\?vid=)(?P<id>[^?#&]+)'
IE_NAME = 'daum.net'
_TESTS = [{
'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',
Reported by Pylint.
Line: 20
Column: 1
class DaumIE(DaumBaseIE):
_VALID_URL = r'https?://(?:(?:m\.)?tvpot\.daum\.net/v/|videofarm\.daum\.net/controller/player/VodPlayer\.swf\?vid=)(?P<id>[^?#&]+)'
IE_NAME = 'daum.net'
_TESTS = [{
'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',
'info_dict': {
Reported by Pylint.
youtube_dl/extractor/skyit.py
29 issues
Line: 4
Column: 1
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_parse_qs,
compat_urllib_parse_urlparse,
)
Reported by Pylint.
Line: 5
Column: 1
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
Reported by Pylint.
Line: 10
Column: 1
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
dict_get,
int_or_none,
parse_duration,
unified_timestamp,
)
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_parse_qs,
compat_urllib_parse_urlparse,
)
Reported by Pylint.
Line: 18
Column: 1
)
class SkyItPlayerIE(InfoExtractor):
IE_NAME = 'player.sky.it'
_VALID_URL = r'https?://player\.sky\.it/player/(?:external|social)\.html\?.*?\bid=(?P<id>\d+)'
_GEO_BYPASS = False
_DOMAIN = 'sky'
_PLAYER_TMPL = 'https://player.sky.it/player/external.html?id=%s&domain=%s'
Reported by Pylint.
Line: 18
Column: 1
)
class SkyItPlayerIE(InfoExtractor):
IE_NAME = 'player.sky.it'
_VALID_URL = r'https?://player\.sky\.it/player/(?:external|social)\.html\?.*?\bid=(?P<id>\d+)'
_GEO_BYPASS = False
_DOMAIN = 'sky'
_PLAYER_TMPL = 'https://player.sky.it/player/external.html?id=%s&domain=%s'
Reported by Pylint.
Line: 63
Column: 1
'thumbnail': dict_get(video, ('video_still', 'video_still_medium', 'thumb')),
'description': video.get('short_desc') or None,
'timestamp': unified_timestamp(video.get('create_date')),
'duration': int_or_none(video.get('duration_sec')) or parse_duration(video.get('duration')),
'is_live': is_live,
}
def _real_extract(self, url):
video_id = self._match_id(url)
Reported by Pylint.
Line: 82
Column: 1
return self._parse_video(video, video_id)
class SkyItVideoIE(SkyItPlayerIE):
IE_NAME = 'video.sky.it'
_VALID_URL = r'https?://(?:masterchef|video|xfactor)\.sky\.it(?:/[^/]+)*/video/[0-9a-z-]+-(?P<id>\d+)'
_TESTS = [{
'url': 'https://video.sky.it/news/mondo/video/uomo-ucciso-da-uno-squalo-in-australia-631227',
'md5': 'fe5c91e59a84a3437eaa0bca6e134ccd',
Reported by Pylint.
Line: 82
Column: 1
return self._parse_video(video, video_id)
class SkyItVideoIE(SkyItPlayerIE):
IE_NAME = 'video.sky.it'
_VALID_URL = r'https?://(?:masterchef|video|xfactor)\.sky\.it(?:/[^/]+)*/video/[0-9a-z-]+-(?P<id>\d+)'
_TESTS = [{
'url': 'https://video.sky.it/news/mondo/video/uomo-ucciso-da-uno-squalo-in-australia-631227',
'md5': 'fe5c91e59a84a3437eaa0bca6e134ccd',
Reported by Pylint.
Line: 84
Column: 1
class SkyItVideoIE(SkyItPlayerIE):
IE_NAME = 'video.sky.it'
_VALID_URL = r'https?://(?:masterchef|video|xfactor)\.sky\.it(?:/[^/]+)*/video/[0-9a-z-]+-(?P<id>\d+)'
_TESTS = [{
'url': 'https://video.sky.it/news/mondo/video/uomo-ucciso-da-uno-squalo-in-australia-631227',
'md5': 'fe5c91e59a84a3437eaa0bca6e134ccd',
'info_dict': {
'id': '631227',
Reported by Pylint.
youtube_dl/extractor/zdf.py
29 issues
Line: 6
Column: 1
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
Reported by Pylint.
Line: 7
Column: 1
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
merge_dicts,
Reported by Pylint.
Line: 8
Column: 1
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
merge_dicts,
NO_DEFAULT,
Reported by Pylint.
Line: 335
Column: 9
return self.playlist_result(
entries, channel_id, self._og_search_title(webpage, fatal=False))
r"""
player = self._extract_player(webpage, channel_id)
channel_id = self._search_regex(
r'docId\s*:\s*(["\'])(?P<id>(?!\1).+?)\1', webpage,
'channel id', group='id')
Reported by Pylint.
Line: 335
Column: 9
return self.playlist_result(
entries, channel_id, self._og_search_title(webpage, fatal=False))
r"""
player = self._extract_player(webpage, channel_id)
channel_id = self._search_regex(
r'docId\s*:\s*(["\'])(?P<id>(?!\1).+?)\1', webpage,
'channel id', group='id')
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
Reported by Pylint.
Line: 25
Column: 1
)
class ZDFBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['DE']
_QUALITIES = ('auto', 'low', 'med', 'high', 'veryhigh', 'hd')
def _call_api(self, url, video_id, item, api_token=None, referrer=None):
headers = {}
Reported by Pylint.
Line: 25
Column: 1
)
class ZDFBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['DE']
_QUALITIES = ('auto', 'low', 'med', 'high', 'veryhigh', 'hd')
def _call_api(self, url, video_id, item, api_token=None, referrer=None):
headers = {}
Reported by Pylint.
Line: 29
Column: 5
_GEO_COUNTRIES = ['DE']
_QUALITIES = ('auto', 'low', 'med', 'high', 'veryhigh', 'hd')
def _call_api(self, url, video_id, item, api_token=None, referrer=None):
headers = {}
if api_token:
headers['Api-Auth'] = 'Bearer %s' % api_token
if referrer:
headers['Referer'] = referrer
Reported by Pylint.
Line: 65
Column: 1
entry_protocol='m3u8_native', fatal=False))
elif mime_type == 'application/f4m+xml' or ext == 'f4m':
formats.extend(self._extract_f4m_formats(
update_url_query(format_url, {'hdcore': '3.7.0'}), video_id, f4m_id='hds', fatal=False))
else:
f = parse_codecs(meta.get('mimeCodec'))
format_id = ['http']
for p in (meta.get('type'), meta.get('quality')):
if p and isinstance(p, compat_str):
Reported by Pylint.