The following issues were found
youtube_dl/extractor/europa.py
11 issues
Line: 4
Column: 1
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
orderedSet,
parse_duration,
Reported by Pylint.
Line: 5
Column: 1
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
orderedSet,
parse_duration,
qualities,
Reported by Pylint.
Line: 6
Column: 1
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
orderedSet,
parse_duration,
qualities,
unified_strdate,
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
orderedSet,
parse_duration,
Reported by Pylint.
Line: 16
Column: 1
)
class EuropaIE(InfoExtractor):
_VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)'
_TESTS = [{
'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758',
'md5': '574f080699ddd1e19a675b0ddf010371',
'info_dict': {
Reported by Pylint.
Line: 16
Column: 1
)
class EuropaIE(InfoExtractor):
_VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)'
_TESTS = [{
'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758',
'md5': '574f080699ddd1e19a675b0ddf010371',
'info_dict': {
Reported by Pylint.
Line: 17
Column: 1
class EuropaIE(InfoExtractor):
_VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)'
_TESTS = [{
'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758',
'md5': '574f080699ddd1e19a675b0ddf010371',
'info_dict': {
'id': 'I107758',
Reported by Pylint.
Line: 40
Column: 5
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
playlist = self._download_xml(
'http://ec.europa.eu/avservices/video/player/playlist.cfm?ID=%s' % video_id, video_id)
Reported by Pylint.
Line: 46
Column: 9
playlist = self._download_xml(
'http://ec.europa.eu/avservices/video/player/playlist.cfm?ID=%s' % video_id, video_id)
def get_item(type_, preference):
items = {}
for item in playlist.findall('./info/%s/item' % type_):
lang, label = xpath_text(item, 'lg', default=None), xpath_text(item, 'label', default=None)
if lang and label:
items[lang] = label.strip()
Reported by Pylint.
Line: 49
Column: 1
def get_item(type_, preference):
items = {}
for item in playlist.findall('./info/%s/item' % type_):
lang, label = xpath_text(item, 'lg', default=None), xpath_text(item, 'label', default=None)
if lang and label:
items[lang] = label.strip()
for p in preference:
if items.get(p):
return items[p]
Reported by Pylint.
test/test_cache.py
10 issues
Line: 1
Column: 1
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import shutil
# Allow direct execution
import os
Reported by Pylint.
Line: 15
Column: 1
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL
from youtube_dl.cache import Cache
def _is_empty(d):
return not bool(os.listdir(d))
Reported by Pylint.
Line: 16
Column: 1
from test.helper import FakeYDL
from youtube_dl.cache import Cache
def _is_empty(d):
return not bool(os.listdir(d))
Reported by Pylint.
Line: 19
Column: 1
from youtube_dl.cache import Cache
def _is_empty(d):
return not bool(os.listdir(d))
def _mkdir(d):
if not os.path.exists(d):
Reported by Pylint.
Line: 23
Column: 1
return not bool(os.listdir(d))
def _mkdir(d):
if not os.path.exists(d):
os.mkdir(d)
class TestCache(unittest.TestCase):
Reported by Pylint.
Line: 28
Column: 1
os.mkdir(d)
class TestCache(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
_mkdir(TESTDATA_DIR)
self.test_dir = os.path.join(TESTDATA_DIR, 'cache_test')
Reported by Pylint.
Line: 30
Column: 9
class TestCache(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
_mkdir(TESTDATA_DIR)
self.test_dir = os.path.join(TESTDATA_DIR, 'cache_test')
self.tearDown()
Reported by Pylint.
Line: 31
Column: 9
class TestCache(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
_mkdir(TESTDATA_DIR)
self.test_dir = os.path.join(TESTDATA_DIR, 'cache_test')
self.tearDown()
def tearDown(self):
Reported by Pylint.
Line: 40
Column: 5
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
def test_cache(self):
ydl = FakeYDL({
'cachedir': self.test_dir,
})
c = Cache(ydl)
obj = {'x': 1, 'y': ['ä', '\\a', True]}
Reported by Pylint.
Line: 44
Column: 9
ydl = FakeYDL({
'cachedir': self.test_dir,
})
c = Cache(ydl)
obj = {'x': 1, 'y': ['ä', '\\a', True]}
self.assertEqual(c.load('test_cache', 'k.'), None)
c.store('test_cache', 'k.', obj)
self.assertEqual(c.load('test_cache', 'k2'), None)
self.assertFalse(_is_empty(self.test_dir))
Reported by Pylint.
youtube_dl/extractor/npr.py
10 issues
Line: 3
Column: 1
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
qualities,
url_or_none,
)
Reported by Pylint.
Line: 4
Column: 1
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
qualities,
url_or_none,
)
Reported by Pylint.
Line: 1
Column: 1
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
qualities,
url_or_none,
)
Reported by Pylint.
Line: 11
Column: 1
)
class NprIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?npr\.org/(?:sections/[^/]+/)?\d{4}/\d{2}/\d{2}/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.npr.org/sections/allsongs/2015/10/21/449974205/new-music-from-beach-house-chairlift-cmj-discoveries-and-more',
'info_dict': {
'id': '449974205',
Reported by Pylint.
Line: 11
Column: 1
)
class NprIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?npr\.org/(?:sections/[^/]+/)?\d{4}/\d{2}/\d{2}/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.npr.org/sections/allsongs/2015/10/21/449974205/new-music-from-beach-house-chairlift-cmj-discoveries-and-more',
'info_dict': {
'id': '449974205',
Reported by Pylint.
Line: 14
Column: 1
class NprIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?npr\.org/(?:sections/[^/]+/)?\d{4}/\d{2}/\d{2}/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.npr.org/sections/allsongs/2015/10/21/449974205/new-music-from-beach-house-chairlift-cmj-discoveries-and-more',
'info_dict': {
'id': '449974205',
'title': 'New Music From Beach House, Chairlift, CMJ Discoveries And More'
},
'playlist_count': 7,
Reported by Pylint.
Line: 21
Column: 1
},
'playlist_count': 7,
}, {
'url': 'https://www.npr.org/sections/deceptivecadence/2015/10/09/446928052/music-from-the-shadows-ancient-armenian-hymns-and-piano-jazz',
'info_dict': {
'id': '446928052',
'title': "Songs We Love: Tigran Hamasyan, 'Your Mercy is Boundless'"
},
'playlist': [{
Reported by Pylint.
Line: 58
Column: 5
'only_matching': True,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
story = self._download_json(
'http://api.npr.org/query', playlist_id, query={
'id': playlist_id,
Reported by Pylint.
Line: 58
Column: 5
'only_matching': True,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
story = self._download_json(
'http://api.npr.org/query', playlist_id, query={
'id': playlist_id,
Reported by Pylint.
Line: 70
Column: 9
})['list']['story'][0]
playlist_title = story.get('title', {}).get('$text')
KNOWN_FORMATS = ('threegp', 'm3u8', 'smil', 'mp4', 'mp3')
quality = qualities(KNOWN_FORMATS)
entries = []
for media in story.get('audio', []) + story.get('multimedia', []):
media_id = media['id']
Reported by Pylint.
youtube_dl/extractor/videopress.py
10 issues
Line: 6
Column: 1
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
parse_age_limit,
Reported by Pylint.
Line: 7
Column: 1
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
parse_age_limit,
qualities,
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
Reported by Pylint.
Line: 19
Column: 1
)
class VideoPressIE(InfoExtractor):
_ID_REGEX = r'[\da-zA-Z]{8}'
_PATH_REGEX = r'video(?:\.word)?press\.com/embed/'
_VALID_URL = r'https?://%s(?P<id>%s)' % (_PATH_REGEX, _ID_REGEX)
_TESTS = [{
'url': 'https://videopress.com/embed/kUJmAcSf',
Reported by Pylint.
Line: 19
Column: 1
)
class VideoPressIE(InfoExtractor):
_ID_REGEX = r'[\da-zA-Z]{8}'
_PATH_REGEX = r'video(?:\.word)?press\.com/embed/'
_VALID_URL = r'https?://%s(?P<id>%s)' % (_PATH_REGEX, _ID_REGEX)
_TESTS = [{
'url': 'https://videopress.com/embed/kUJmAcSf',
Reported by Pylint.
Line: 48
Column: 1
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<iframe[^>]+src=["\']((?:https?://)?%s%s)' % (VideoPressIE._PATH_REGEX, VideoPressIE._ID_REGEX),
webpage)
def _real_extract(self, url):
video_id = self._match_id(url)
Reported by Pylint.
Line: 51
Column: 5
r'<iframe[^>]+src=["\']((?:https?://)?%s%s)' % (VideoPressIE._PATH_REGEX, VideoPressIE._ID_REGEX),
webpage)
def _real_extract(self, url):
video_id = self._match_id(url)
query = random_birthday('birth_year', 'birth_month', 'birth_day')
query['fields'] = 'description,duration,file_url_base,files,height,original,poster,rating,title,upload_date,width'
video = self._download_json(
Reported by Pylint.
Line: 55
Column: 1
video_id = self._match_id(url)
query = random_birthday('birth_year', 'birth_month', 'birth_day')
query['fields'] = 'description,duration,file_url_base,files,height,original,poster,rating,title,upload_date,width'
video = self._download_json(
'https://public-api.wordpress.com/rest/v1.1/videos/%s' % video_id,
video_id, query=query)
title = video['title']
Reported by Pylint.
Line: 65
Column: 9
file_url_base = video.get('file_url_base') or {}
base_url = file_url_base.get('https') or file_url_base.get('http')
QUALITIES = ('std', 'dvd', 'hd')
quality = qualities(QUALITIES)
formats = []
for format_id, f in (video.get('files') or {}).items():
if not isinstance(f, dict):
Reported by Pylint.
Line: 69
Column: 24
quality = qualities(QUALITIES)
formats = []
for format_id, f in (video.get('files') or {}).items():
if not isinstance(f, dict):
continue
for ext, path in f.items():
if ext in ('mp4', 'ogg'):
formats.append({
Reported by Pylint.
youtube_dl/extractor/beeg.py
10 issues
Line: 3
Column: 1
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
int_or_none,
Reported by Pylint.
Line: 4
Column: 1
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
int_or_none,
Reported by Pylint.
Line: 8
Column: 1
compat_str,
compat_urlparse,
)
from ..utils import (
int_or_none,
unified_timestamp,
)
Reported by Pylint.
Line: 1
Column: 1
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
int_or_none,
Reported by Pylint.
Line: 14
Column: 1
)
class BeegIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?beeg\.(?:com|porn(?:/video)?)/(?P<id>\d+)'
_TESTS = [{
# api/v6 v1
'url': 'http://beeg.com/5416503',
'md5': 'a1a1b1a8bc70a89e49ccfd113aed0820',
Reported by Pylint.
Line: 14
Column: 1
)
class BeegIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?beeg\.(?:com|porn(?:/video)?)/(?P<id>\d+)'
_TESTS = [{
# api/v6 v1
'url': 'http://beeg.com/5416503',
'md5': 'a1a1b1a8bc70a89e49ccfd113aed0820',
Reported by Pylint.
Line: 47
Column: 5
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
beeg_version = self._search_regex(
Reported by Pylint.
Line: 60
Column: 13
query = {
'v': 2,
}
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
t = qs.get('t', [''])[0].split('-')
if len(t) > 1:
query.update({
's': t[0],
'e': t[1],
Reported by Pylint.
Line: 61
Column: 13
'v': 2,
}
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
t = qs.get('t', [''])[0].split('-')
if len(t) > 1:
query.update({
's': t[0],
'e': t[1],
})
Reported by Pylint.
Line: 88
Column: 1
continue
formats.append({
'url': self._proto_relative_url(
video_url.replace('{DATA_MARKERS}', 'data=pc_XX__%s_0' % beeg_version), 'https:'),
'format_id': format_id,
'height': int(height),
})
self._sort_formats(formats)
Reported by Pylint.
youtube_dl/extractor/viidea.py
10 issues
Line: 5
Column: 1
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urlparse,
)
Reported by Pylint.
Line: 6
Column: 1
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urlparse,
)
from ..utils import (
Reported by Pylint.
Line: 11
Column: 1
compat_str,
compat_urlparse,
)
from ..utils import (
ExtractorError,
js_to_json,
parse_duration,
parse_iso8601,
)
Reported by Pylint.
Line: 141
Column: 17
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
msg = self._parse_json(
e.cause.read().decode('utf-8'), lecture_id)
raise ExtractorError(msg['detail'], expected=True)
raise
lecture_info = {
'id': lecture_id,
'display_id': lecture_slug,
Reported by Pylint.
Line: 1
Column: 1
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urlparse,
Reported by Pylint.
Line: 19
Column: 1
)
class ViideaIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?(?:
videolectures\.net|
flexilearn\.viidea\.net|
presentations\.ocwconsortium\.org|
video\.travel-zoom\.si|
Reported by Pylint.
Line: 19
Column: 1
)
class ViideaIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?(?:
videolectures\.net|
flexilearn\.viidea\.net|
presentations\.ocwconsortium\.org|
video\.travel-zoom\.si|
Reported by Pylint.
Line: 119
Column: 5
'playlist_count': 2,
}]
def _real_extract(self, url):
lecture_slug, explicit_part_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, lecture_slug)
cfg = self._parse_json(self._search_regex(
Reported by Pylint.
Line: 137
Column: 9
lecture_data = self._download_json(
'%s/site/api/lecture/%s?format=json' % (base_url, lecture_id),
lecture_id)['lecture'][0]
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
msg = self._parse_json(
e.cause.read().decode('utf-8'), lecture_id)
raise ExtractorError(msg['detail'], expected=True)
raise
Reported by Pylint.
Line: 165
Column: 1
info = self._parse_smil(smil, smil_url, lecture_id)
self._sort_formats(info['formats'])
info['id'] = lecture_id if not multipart else '%s_part%s' % (lecture_id, part_id)
info['display_id'] = lecture_slug if not multipart else '%s_part%s' % (lecture_slug, part_id)
if multipart:
info['title'] += ' (Part %s)' % part_id
switch = smil.find('.//switch')
if switch is not None:
info['duration'] = parse_duration(switch.attrib.get('dur'))
Reported by Pylint.
youtube_dl/extractor/viddler.py
10 issues
Line: 5
Column: 1
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
)
Reported by Pylint.
Line: 6
Column: 1
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
)
Reported by Pylint.
Line: 1
Column: 1
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
)
Reported by Pylint.
Line: 12
Column: 1
)
class ViddlerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?viddler\.com/(?:v|embed|player)/(?P<id>[a-z0-9]+)(?:.+?\bsecret=(\d+))?'
_TESTS = [{
'url': 'http://www.viddler.com/v/43903784',
'md5': '9eee21161d2c7f5b39690c3e325fab2f',
'info_dict': {
Reported by Pylint.
Line: 12
Column: 1
)
class ViddlerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?viddler\.com/(?:v|embed|player)/(?P<id>[a-z0-9]+)(?:.+?\bsecret=(\d+))?'
_TESTS = [{
'url': 'http://www.viddler.com/v/43903784',
'md5': '9eee21161d2c7f5b39690c3e325fab2f',
'info_dict': {
Reported by Pylint.
Line: 13
Column: 1
class ViddlerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?viddler\.com/(?:v|embed|player)/(?P<id>[a-z0-9]+)(?:.+?\bsecret=(\d+))?'
_TESTS = [{
'url': 'http://www.viddler.com/v/43903784',
'md5': '9eee21161d2c7f5b39690c3e325fab2f',
'info_dict': {
'id': '43903784',
Reported by Pylint.
Line: 29
Column: 1
'thumbnail': r're:^https?://.*\.jpg$',
'view_count': int,
'comment_count': int,
'categories': ['video content', 'high quality video', 'video made easy', 'how to produce video with limited resources', 'viddler'],
}
}, {
'url': 'http://www.viddler.com/v/4d03aad9/',
'md5': 'f12c5a7fa839c47a79363bfdf69404fb',
'info_dict': {
Reported by Pylint.
Line: 96
Column: 13
if filed.get('status', 'ready') != 'ready':
continue
format_id = filed.get('profile_id') or filed['profile_name']
f = {
'format_id': format_id,
'format_note': filed['profile_name'],
'url': self._proto_relative_url(filed['url']),
'width': int_or_none(filed.get('width')),
'height': int_or_none(filed.get('height')),
Reported by Pylint.
Line: 109
Column: 17
formats.append(f)
if filed.get('cdn_url'):
f = f.copy()
f['url'] = self._proto_relative_url(filed['cdn_url'], 'http:')
f['format_id'] = format_id + '-cdn'
f['source_preference'] = 1
formats.append(f)
Reported by Pylint.
Line: 116
Column: 17
formats.append(f)
if filed.get('html5_video_source'):
f = f.copy()
f['url'] = self._proto_relative_url(filed['html5_video_source'])
f['format_id'] = format_id + '-html5'
f['source_preference'] = 0
formats.append(f)
self._sort_formats(formats)
Reported by Pylint.
youtube_dl/extractor/rmcdecouverte.py
10 issues
Line: 6
Column: 1
import re
from .common import InfoExtractor
from .brightcove import BrightcoveLegacyIE
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
Reported by Pylint.
Line: 7
Column: 1
import re
from .common import InfoExtractor
from .brightcove import BrightcoveLegacyIE
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
from ..utils import smuggle_url
Reported by Pylint.
Line: 8
Column: 1
from .common import InfoExtractor
from .brightcove import BrightcoveLegacyIE
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
from ..utils import smuggle_url
Reported by Pylint.
Line: 12
Column: 1
compat_parse_qs,
compat_urlparse,
)
from ..utils import smuggle_url
class RMCDecouverteIE(InfoExtractor):
_VALID_URL = r'https?://rmcdecouverte\.bfmtv\.com/(?:(?:[^/]+/)*program_(?P<id>\d+)|(?P<live_id>mediaplayer-direct))'
Reported by Pylint.
Line: 44
Column: 33
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id') or mobj.group('live_id')
webpage = self._download_webpage(url, display_id)
brightcove_legacy_url = BrightcoveLegacyIE._extract_brightcove_url(webpage)
if brightcove_legacy_url:
brightcove_id = compat_parse_qs(compat_urlparse.urlparse(
brightcove_legacy_url).query)['@videoPlayer'][0]
else:
brightcove_id = self._search_regex(
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .brightcove import BrightcoveLegacyIE
from ..compat import (
compat_parse_qs,
Reported by Pylint.
Line: 15
Column: 1
from ..utils import smuggle_url
class RMCDecouverteIE(InfoExtractor):
_VALID_URL = r'https?://rmcdecouverte\.bfmtv\.com/(?:(?:[^/]+/)*program_(?P<id>\d+)|(?P<live_id>mediaplayer-direct))'
_TESTS = [{
'url': 'https://rmcdecouverte.bfmtv.com/wheeler-dealers-occasions-a-saisir/program_2566/',
'info_dict': {
Reported by Pylint.
Line: 15
Column: 1
from ..utils import smuggle_url
class RMCDecouverteIE(InfoExtractor):
_VALID_URL = r'https?://rmcdecouverte\.bfmtv\.com/(?:(?:[^/]+/)*program_(?P<id>\d+)|(?P<live_id>mediaplayer-direct))'
_TESTS = [{
'url': 'https://rmcdecouverte.bfmtv.com/wheeler-dealers-occasions-a-saisir/program_2566/',
'info_dict': {
Reported by Pylint.
Line: 16
Column: 1
class RMCDecouverteIE(InfoExtractor):
_VALID_URL = r'https?://rmcdecouverte\.bfmtv\.com/(?:(?:[^/]+/)*program_(?P<id>\d+)|(?P<live_id>mediaplayer-direct))'
_TESTS = [{
'url': 'https://rmcdecouverte.bfmtv.com/wheeler-dealers-occasions-a-saisir/program_2566/',
'info_dict': {
'id': '5983675500001',
Reported by Pylint.
Line: 38
Column: 1
'url': 'https://rmcdecouverte.bfmtv.com/mediaplayer-direct/',
'only_matching': True,
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1969646226001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id') or mobj.group('live_id')
webpage = self._download_webpage(url, display_id)
Reported by Pylint.
youtube_dl/extractor/vesti.py
10 issues
Line: 6
Column: 1
import re
from .common import InfoExtractor
from ..utils import ExtractorError
from .rutv import RUTVIE
class VestiIE(InfoExtractor):
Reported by Pylint.
Line: 7
Column: 1
import re
from .common import InfoExtractor
from ..utils import ExtractorError
from .rutv import RUTVIE
class VestiIE(InfoExtractor):
IE_DESC = 'Вести.Ru'
Reported by Pylint.
Line: 8
Column: 1
from .common import InfoExtractor
from ..utils import ExtractorError
from .rutv import RUTVIE
class VestiIE(InfoExtractor):
IE_DESC = 'Вести.Ru'
_VALID_URL = r'https?://(?:.+?\.)?vesti\.ru/(?P<id>.+)'
Reported by Pylint.
Line: 117
Column: 20
page = self._download_webpage('http://www.vesti.ru/only_video.html?vid=%s' % video_id, video_id,
'Downloading video page')
rutv_url = RUTVIE._extract_url(page)
if rutv_url:
return self.url_result(rutv_url, 'RUTV')
raise ExtractorError('No video found', expected=True)
Reported by Pylint.
Line: 1
Column: 1
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
from .rutv import RUTVIE
Reported by Pylint.
Line: 11
Column: 1
from .rutv import RUTVIE
class VestiIE(InfoExtractor):
IE_DESC = 'Вести.Ru'
_VALID_URL = r'https?://(?:.+?\.)?vesti\.ru/(?P<id>.+)'
_TESTS = [
{
Reported by Pylint.
Line: 11
Column: 1
from .rutv import RUTVIE
class VestiIE(InfoExtractor):
IE_DESC = 'Вести.Ru'
_VALID_URL = r'https?://(?:.+?\.)?vesti\.ru/(?P<id>.+)'
_TESTS = [
{
Reported by Pylint.
Line: 77
Column: 1
'info_dict': {
'id': '766403',
'ext': 'mp4',
'title': 'XXII зимние Олимпийские игры. Российские хоккеисты стартовали на Олимпиаде с победы',
'description': 'md5:55805dfd35763a890ff50fa9e35e31b3',
'duration': 271,
},
'params': {
# m3u8 download
Reported by Pylint.
Line: 110
Column: 1
page = self._download_webpage(url, video_id, 'Downloading page')
mobj = re.search(
r'<meta[^>]+?property="og:video"[^>]+?content="http://www\.vesti\.ru/i/flvplayer_videoHost\.swf\?vid=(?P<id>\d+)',
page)
if mobj:
video_id = mobj.group('id')
page = self._download_webpage('http://www.vesti.ru/only_video.html?vid=%s' % video_id, video_id,
'Downloading video page')
Reported by Pylint.
Line: 114
Column: 1
page)
if mobj:
video_id = mobj.group('id')
page = self._download_webpage('http://www.vesti.ru/only_video.html?vid=%s' % video_id, video_id,
'Downloading video page')
rutv_url = RUTVIE._extract_url(page)
if rutv_url:
return self.url_result(rutv_url, 'RUTV')
Reported by Pylint.
youtube_dl/extractor/redtube.py
10 issues
Line: 5
Column: 1
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
merge_dicts,
Reported by Pylint.
Line: 6
Column: 1
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
merge_dicts,
str_to_int,
Reported by Pylint.
Line: 1
Column: 1
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
Reported by Pylint.
Line: 17
Column: 1
)
class RedTubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:\w+\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.redtube.com/66418',
'md5': 'fc08071233725f26b8f014dba9590005',
'info_dict': {
Reported by Pylint.
Line: 17
Column: 1
)
class RedTubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:\w+\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.redtube.com/66418',
'md5': 'fc08071233725f26b8f014dba9590005',
'info_dict': {
Reported by Pylint.
Line: 18
Column: 1
class RedTubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:\w+\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.redtube.com/66418',
'md5': 'fc08071233725f26b8f014dba9590005',
'info_dict': {
'id': '66418',
Reported by Pylint.
Line: 45
Column: 5
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)',
webpage)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.redtube.com/%s' % video_id, video_id)
ERRORS = (
Reported by Pylint.
Line: 50
Column: 9
webpage = self._download_webpage(
'http://www.redtube.com/%s' % video_id, video_id)
ERRORS = (
(('video-deleted-info', '>This video has been removed'), 'has been removed'),
(('private_video_text', '>This video is private', '>Send a friend request to its owner to be able to view it'), 'is private'),
)
for patterns, message in ERRORS:
Reported by Pylint.
Line: 52
Column: 1
ERRORS = (
(('video-deleted-info', '>This video has been removed'), 'has been removed'),
(('private_video_text', '>This video is private', '>Send a friend request to its owner to be able to view it'), 'is private'),
)
for patterns, message in ERRORS:
if any(p in webpage for p in patterns):
raise ExtractorError(
Reported by Pylint.
Line: 64
Column: 1
if not info.get('title'):
info['title'] = self._html_search_regex(
(r'<h(\d)[^>]+class="(?:video_title_text|videoTitle|video_title)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',),
webpage, 'title', group='title',
default=None) or self._og_search_title(webpage)
formats = []
Reported by Pylint.