zip
stringlengths
19
109
filename
stringlengths
4
185
contents
stringlengths
0
30.1M
type_annotations
listlengths
0
1.97k
type_annotation_starts
listlengths
0
1.97k
type_annotation_ends
listlengths
0
1.97k
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/kanalplay.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, srt_subtitles_timecode, ) class KanalPlayIE(InfoExtractor): IE_DESC = 'Kanal 5/9/11 Play' _VALID_URL = r'https?://(?:www\.)?kanal(?P<channel_id>5|9|11)play\.se/(?:#!/)?(?:play/)?program/\d+/video/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.kanal5play.se/#!/play/program/3060212363/video/3270012277', 'info_dict': { 'id': '3270012277', 'ext': 'flv', 'title': 'Saknar både dusch och avlopp', 'description': 'md5:6023a95832a06059832ae93bc3c7efb7', 'duration': 2636.36, }, 'params': { # rtmp download 'skip_download': True, } }, { 'url': 'http://www.kanal9play.se/#!/play/program/335032/video/246042', 'only_matching': True, }, { 'url': 'http://www.kanal11play.se/#!/play/program/232835958/video/367135199', 'only_matching': True, }] def _fix_subtitles(self, subs): return '\r\n\r\n'.join( '%s\r\n%s --> %s\r\n%s' % ( num, srt_subtitles_timecode(item['startMillis'] / 1000.0), srt_subtitles_timecode(item['endMillis'] / 1000.0), item['text'], ) for num, item in enumerate(subs, 1)) def _get_subtitles(self, channel_id, video_id): subs = self._download_json( 'http://www.kanal%splay.se/api/subtitles/%s' % (channel_id, video_id), video_id, 'Downloading subtitles JSON', fatal=False) return {'sv': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]} if subs else {} def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') channel_id = mobj.group('channel_id') video = self._download_json( 'http://www.kanal%splay.se/api/getVideo?format=FLASH&videoId=%s' % (channel_id, video_id), video_id) reasons_for_no_streams = video.get('reasonsForNoStreams') if reasons_for_no_streams: raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, '\n'.join(reasons_for_no_streams)), expected=True) title = video['title'] description = video.get('description') duration = float_or_none(video.get('length'), 1000) thumbnail = video.get('posterUrl') stream_base_url = video['streamBaseUrl'] formats = [{ 'url': stream_base_url, 'play_path': stream['source'], 'ext': 'flv', 'tbr': float_or_none(stream.get('bitrate'), 1000), 'rtmp_real_time': True, } for stream in video['streams']] self._sort_formats(formats) subtitles = {} if video.get('hasSubtitle'): subtitles = self.extract_subtitles(channel_id, video_id) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, 'subtitles': subtitles, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/kankan.py
from __future__ import unicode_literals import re import hashlib from .common import InfoExtractor _md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest() class KankanIE(InfoExtractor): _VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml' _TEST = { 'url': 'http://yinyue.kankan.com/vod/48/48863.shtml', 'md5': '29aca1e47ae68fc28804aca89f29507e', 'info_dict': { 'id': '48863', 'ext': 'flv', 'title': 'Ready To Go', }, 'skip': 'Only available from China', } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, 'video title') surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0) gcids = re.findall(r'http://.+?/.+?/(.+?)/', surls) gcid = gcids[-1] info_url = 'http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid video_info_page = self._download_webpage( info_url, video_id, 'Downloading video url info') ip = self._search_regex(r'ip:"(.+?)"', video_info_page, 'video url ip') path = self._search_regex(r'path:"(.+?)"', video_info_page, 'video url path') param1 = self._search_regex(r'param1:(\d+)', video_info_page, 'param1') param2 = self._search_regex(r'param2:(\d+)', video_info_page, 'param2') key = _md5('xl_mp43651' + param1 + param2) video_url = 'http://%s%s?key=%s&key1=%s' % (ip, path, key, param2) return { 'id': video_id, 'title': title, 'url': video_url, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/karaoketv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class KaraoketvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?karaoketv\.co\.il/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'http://www.karaoketv.co.il/%D7%A9%D7%99%D7%A8%D7%99_%D7%A7%D7%A8%D7%99%D7%95%D7%A7%D7%99/58356/%D7%90%D7%99%D7%96%D7%95%D7%9F', 'info_dict': { 'id': '58356', 'ext': 'flv', 'title': 'קריוקי של איזון', }, 'params': { # rtmp download 'skip_download': True, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) api_page_url = self._search_regex( r'<iframe[^>]+src=(["\'])(?P<url>https?://www\.karaoke\.co\.il/api_play\.php\?.+?)\1', webpage, 'API play URL', group='url') api_page = self._download_webpage(api_page_url, video_id) video_cdn_url = self._search_regex( r'<iframe[^>]+src=(["\'])(?P<url>https?://www\.video-cdn\.com/embed/iframe/.+?)\1', api_page, 'video cdn URL', group='url') video_cdn = self._download_webpage(video_cdn_url, video_id) play_path = self._parse_json( self._search_regex( r'var\s+options\s*=\s*({.+?});', video_cdn, 'options'), video_id)['clip']['url'] settings = self._parse_json( self._search_regex( r'var\s+settings\s*=\s*({.+?});', video_cdn, 'servers', default='{}'), video_id, fatal=False) or {} servers = settings.get('servers') if not servers or not isinstance(servers, list): servers = ('wowzail.video-cdn.com:80/vodcdn', ) formats = [{ 'url': 'rtmp://%s' % server if not server.startswith('rtmp') else server, 'play_path': play_path, 'app': 'vodcdn', 'page_url': video_cdn_url, 'player_url': 'http://www.video-cdn.com/assets/flowplayer/flowplayer.commercial-3.2.18.swf', 'rtmp_real_time': True, 'ext': 'flv', } for server in servers] return { 'id': video_id, 'title': self._og_search_title(webpage), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/karrierevideos.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( fix_xml_ampersands, float_or_none, xpath_with_ns, xpath_text, ) class KarriereVideosIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?karrierevideos\.at(?:/[^/]+)+/(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.karrierevideos.at/berufsvideos/mittlere-hoehere-schulen/altenpflegerin', 'info_dict': { 'id': '32c91', 'ext': 'flv', 'title': 'AltenpflegerIn', 'description': 'md5:dbadd1259fde2159a9b28667cb664ae2', 'thumbnail': r're:^http://.*\.png', }, 'params': { # rtmp download 'skip_download': True, } }, { # broken ampersands 'url': 'http://www.karrierevideos.at/orientierung/vaeterkarenz-und-neue-chancen-fuer-muetter-baby-was-nun', 'info_dict': { 'id': '5sniu', 'ext': 'flv', 'title': 'Väterkarenz und neue Chancen für Mütter - "Baby - was nun?"', 'description': 'md5:97092c6ad1fd7d38e9d6a5fdeb2bcc33', 'thumbnail': r're:^http://.*\.png', }, 'params': { # rtmp download 'skip_download': True, } }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = (self._html_search_meta('title', webpage, default=None) or self._search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'video title')) video_id = self._search_regex( r'/config/video/(.+?)\.xml', webpage, 'video id') # Server returns malformed headers # Force Accept-Encoding: * to prevent gzipped results playlist = self._download_xml( 'http://www.karrierevideos.at/player-playlist.xml.php?p=%s' % video_id, video_id, transform_source=fix_xml_ampersands, headers={'Accept-Encoding': '*'}) NS_MAP = { 'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats' } def ns(path): return xpath_with_ns(path, NS_MAP) item = playlist.find('./tracklist/item') video_file = xpath_text( item, ns('./jwplayer:file'), 'video url', fatal=True) streamer = xpath_text( item, ns('./jwplayer:streamer'), 'streamer', fatal=True) uploader = xpath_text( item, ns('./jwplayer:author'), 'uploader') duration = float_or_none( xpath_text(item, ns('./jwplayer:duration'), 'duration')) description = self._html_search_regex( r'(?s)<div class="leadtext">(.+?)</div>', webpage, 'description') thumbnail = self._html_search_meta( 'thumbnail', webpage, 'thumbnail') if thumbnail: thumbnail = compat_urlparse.urljoin(url, thumbnail) return { 'id': video_id, 'url': streamer.replace('rtmpt', 'rtmp'), 'play_path': 'mp4:%s' % video_file, 'ext': 'flv', 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'duration': duration, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/keezmovies.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..aes import aes_decrypt_text from ..compat import compat_urllib_parse_unquote from ..utils import ( determine_ext, ExtractorError, int_or_none, str_to_int, strip_or_none, url_or_none, ) class KeezMoviesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?keezmovies\.com/video/(?:(?P<display_id>[^/]+)-)?(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.keezmovies.com/video/arab-wife-want-it-so-bad-i-see-she-thirsty-and-has-tiny-money-18070681', 'md5': '2ac69cdb882055f71d82db4311732a1a', 'info_dict': { 'id': '18070681', 'display_id': 'arab-wife-want-it-so-bad-i-see-she-thirsty-and-has-tiny-money', 'ext': 'mp4', 'title': 'Arab wife want it so bad I see she thirsty and has tiny money.', 'thumbnail': None, 'view_count': int, 'age_limit': 18, } }, { 'url': 'http://www.keezmovies.com/video/18070681', 'only_matching': True, }] def _extract_info(self, url, fatal=True): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = (mobj.group('display_id') if 'display_id' in mobj.groupdict() else None) or mobj.group('id') webpage = self._download_webpage( url, display_id, headers={'Cookie': 'age_verified=1'}) formats = [] format_urls = set() title = None thumbnail = None duration = None encrypted = False def extract_format(format_url, height=None): format_url = url_or_none(format_url) if not format_url or not format_url.startswith(('http', '//')): return if format_url in format_urls: return format_urls.add(format_url) tbr = int_or_none(self._search_regex( r'[/_](\d+)[kK][/_]', format_url, 'tbr', default=None)) if not height: height = int_or_none(self._search_regex( r'[/_](\d+)[pP][/_]', format_url, 'height', default=None)) if encrypted: format_url = aes_decrypt_text( video_url, title, 32).decode('utf-8') formats.append({ 'url': format_url, 'format_id': '%dp' % height if height else None, 'height': height, 'tbr': tbr, }) flashvars = self._parse_json( self._search_regex( r'flashvars\s*=\s*({.+?});', webpage, 'flashvars', default='{}'), display_id, fatal=False) if flashvars: title = flashvars.get('video_title') thumbnail = flashvars.get('image_url') duration = int_or_none(flashvars.get('video_duration')) encrypted = flashvars.get('encrypted') is True for key, value in flashvars.items(): mobj = re.search(r'quality_(\d+)[pP]', key) if mobj: extract_format(value, int(mobj.group(1))) video_url = flashvars.get('video_url') if video_url and determine_ext(video_url, None): extract_format(video_url) video_url = self._html_search_regex( r'flashvars\.video_url\s*=\s*(["\'])(?P<url>http.+?)\1', webpage, 'video url', default=None, group='url') if video_url: extract_format(compat_urllib_parse_unquote(video_url)) if not formats: if 'title="This video is no longer available"' in webpage: raise ExtractorError( 'Video %s is no longer available' % video_id, expected=True) try: self._sort_formats(formats) except ExtractorError: if fatal: raise if not title: title = self._html_search_regex( r'<h1[^>]*>([^<]+)', webpage, 'title') return webpage, { 'id': video_id, 'display_id': display_id, 'title': strip_or_none(title), 'thumbnail': thumbnail, 'duration': duration, 'age_limit': 18, 'formats': formats, } def _real_extract(self, url): webpage, info = self._extract_info(url, fatal=False) if not info['formats']: return self.url_result(url, 'Generic') info['view_count'] = str_to_int(self._search_regex( r'<b>([\d,.]+)</b> Views?', webpage, 'view count', fatal=False)) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/ketnet.py
from __future__ import unicode_literals from .canvas import CanvasIE from .common import InfoExtractor class KetnetIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ketnet\.be/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.ketnet.be/kijken/zomerse-filmpjes', 'md5': '6bdeb65998930251bbd1c510750edba9', 'info_dict': { 'id': 'zomerse-filmpjes', 'ext': 'mp4', 'title': 'Gluur mee op de filmset en op Pennenzakkenrock', 'description': 'Gluur mee met Ghost Rockers op de filmset', 'thumbnail': r're:^https?://.*\.jpg$', } }, { # mzid in playerConfig instead of sources 'url': 'https://www.ketnet.be/kijken/nachtwacht/de-greystook', 'md5': '90139b746a0a9bd7bb631283f6e2a64e', 'info_dict': { 'id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475', 'display_id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475', 'ext': 'flv', 'title': 'Nachtwacht: De Greystook', 'description': 'md5:1db3f5dc4c7109c821261e7512975be7', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1468.03, }, 'expected_warnings': ['is not a supported codec', 'Unknown MIME type'], }, { 'url': 'https://www.ketnet.be/kijken/karrewiet/uitzending-8-september-2016', 'only_matching': True, }, { 'url': 'https://www.ketnet.be/achter-de-schermen/sien-repeteert-voor-stars-for-life', 'only_matching': True, }, { # mzsource, geo restricted to Belgium 'url': 'https://www.ketnet.be/kijken/nachtwacht/de-bermadoe', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) config = self._parse_json( self._search_regex( r'(?s)playerConfig\s*=\s*({.+?})\s*;', webpage, 'player config'), video_id) mzid = config.get('mzid') if mzid: return self.url_result( 'https://mediazone.vrt.be/api/v1/ketnet/assets/%s' % mzid, CanvasIE.ie_key(), video_id=mzid) title = config['title'] formats = [] for source_key in ('', 'mz'): source = config.get('%ssource' % source_key) if not isinstance(source, dict): continue for format_id, format_url in source.items(): if format_id == 'hls': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=format_id, fatal=False)) elif format_id == 'hds': formats.extend(self._extract_f4m_formats( format_url, video_id, f4m_id=format_id, fatal=False)) else: formats.append({ 'url': format_url, 'format_id': format_id, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': config.get('description'), 'thumbnail': config.get('image'), 'series': config.get('program'), 'episode': config.get('episode'), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/khanacademy.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( unified_strdate, ) class KhanAcademyIE(InfoExtractor): _VALID_URL = r'^https?://(?:(?:www|api)\.)?khanacademy\.org/(?P<key>[^/]+)/(?:[^/]+/){,2}(?P<id>[^?#/]+)(?:$|[?#])' IE_NAME = 'KhanAcademy' _TESTS = [{ 'url': 'http://www.khanacademy.org/video/one-time-pad', 'md5': '7b391cce85e758fb94f763ddc1bbb979', 'info_dict': { 'id': 'one-time-pad', 'ext': 'webm', 'title': 'The one-time pad', 'description': 'The perfect cipher', 'duration': 176, 'uploader': 'Brit Cruise', 'uploader_id': 'khanacademy', 'upload_date': '20120411', }, 'add_ie': ['Youtube'], }, { 'url': 'https://www.khanacademy.org/math/applied-math/cryptography', 'info_dict': { 'id': 'cryptography', 'title': 'Journey into cryptography', 'description': 'How have humans protected their secret messages through history? What has changed today?', }, 'playlist_mincount': 3, }] def _real_extract(self, url): m = re.match(self._VALID_URL, url) video_id = m.group('id') if m.group('key') == 'video': data = self._download_json( 'http://api.khanacademy.org/api/v1/videos/' + video_id, video_id, 'Downloading video info') upload_date = unified_strdate(data['date_added']) uploader = ', '.join(data['author_names']) return { '_type': 'url_transparent', 'url': data['url'], 'id': video_id, 'title': data['title'], 'thumbnail': data['image_url'], 'duration': data['duration'], 'description': data['description'], 'uploader': uploader, 'upload_date': upload_date, } else: # topic data = self._download_json( 'http://api.khanacademy.org/api/v1/topic/' + video_id, video_id, 'Downloading topic info') entries = [ { '_type': 'url', 'url': c['url'], 'id': c['id'], 'title': c['title'], } for c in data['children'] if c['kind'] in ('Video', 'Topic')] return { '_type': 'playlist', 'id': video_id, 'title': data['title'], 'description': data['description'], 'entries': entries, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/kickstarter.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import smuggle_url class KickStarterIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?kickstarter\.com/projects/(?P<id>[^/]*)/.*' _TESTS = [{ 'url': 'https://www.kickstarter.com/projects/1404461844/intersection-the-story-of-josh-grant/description', 'md5': 'c81addca81327ffa66c642b5d8b08cab', 'info_dict': { 'id': '1404461844', 'ext': 'mp4', 'title': 'Intersection: The Story of Josh Grant by Kyle Cowling', 'description': ( 'A unique motocross documentary that examines the ' 'life and mind of one of sports most elite athletes: Josh Grant.' ), }, }, { 'note': 'Embedded video (not using the native kickstarter video service)', 'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178', 'info_dict': { 'id': '78704821', 'ext': 'mp4', 'uploader_id': 'pebble', 'uploader': 'Pebble Technology', 'title': 'Pebble iOS Notifications', }, 'add_ie': ['Vimeo'], }, { 'url': 'https://www.kickstarter.com/projects/1420158244/power-drive-2000/widget/video.html', 'info_dict': { 'id': '1420158244', 'ext': 'mp4', 'title': 'Power Drive 2000', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<title>\s*(.*?)(?:\s*&mdash;\s*Kickstarter)?\s*</title>', webpage, 'title') video_url = self._search_regex( r'data-video-url="(.*?)"', webpage, 'video URL', default=None) if video_url is None: # No native kickstarter, look for embedded videos return { '_type': 'url_transparent', 'ie_key': 'Generic', 'url': smuggle_url(url, {'to_generic': True}), 'title': title, } thumbnail = self._og_search_thumbnail(webpage, default=None) if thumbnail is None: thumbnail = self._html_search_regex( r'<img[^>]+class="[^"]+\s*poster\s*[^"]+"[^>]+src="([^"]+)"', webpage, 'thumbnail image', fatal=False) return { 'id': video_id, 'url': video_url, 'title': title, 'description': self._og_search_description(webpage, default=None), 'thumbnail': thumbnail, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/kinopoisk.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( dict_get, int_or_none, ) class KinoPoiskIE(InfoExtractor): _GEO_COUNTRIES = ['RU'] _VALID_URL = r'https?://(?:www\.)?kinopoisk\.ru/film/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.kinopoisk.ru/film/81041/watch/', 'md5': '4f71c80baea10dfa54a837a46111d326', 'info_dict': { 'id': '81041', 'ext': 'mp4', 'title': 'Алеша попович и тугарин змей', 'description': 'md5:43787e673d68b805d0aa1df5a5aea701', 'thumbnail': r're:^https?://.*', 'duration': 4533, 'age_limit': 12, }, 'params': { 'format': 'bestvideo', }, }, { 'url': 'https://www.kinopoisk.ru/film/81041', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://ott-widget.kinopoisk.ru/v1/kp/', video_id, query={'kpId': video_id}) data = self._parse_json( self._search_regex( r'(?s)<script[^>]+\btype=["\']application/json[^>]+>(.+?)<', webpage, 'data'), video_id)['models'] film = data['filmStatus'] title = film.get('title') or film['originalTitle'] formats = self._extract_m3u8_formats( data['playlistEntity']['uri'], video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') self._sort_formats(formats) description = dict_get( film, ('descriptscription', 'description', 'shortDescriptscription', 'shortDescription')) thumbnail = film.get('coverUrl') or film.get('posterUrl') duration = int_or_none(film.get('duration')) age_limit = int_or_none(film.get('restrictionAge')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'age_limit': age_limit, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/konserthusetplay.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, float_or_none, int_or_none, url_or_none, ) class KonserthusetPlayIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:konserthusetplay|rspoplay)\.se/\?.*\bm=(?P<id>[^&]+)' _TESTS = [{ 'url': 'http://www.konserthusetplay.se/?m=CKDDnlCY-dhWAAqiMERd-A', 'md5': 'e3fd47bf44e864bd23c08e487abe1967', 'info_dict': { 'id': 'CKDDnlCY-dhWAAqiMERd-A', 'ext': 'mp4', 'title': 'Orkesterns instrument: Valthornen', 'description': 'md5:f10e1f0030202020396a4d712d2fa827', 'thumbnail': 're:^https?://.*$', 'duration': 398.76, }, }, { 'url': 'http://rspoplay.se/?m=elWuEH34SMKvaO4wO_cHBw', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) e = self._search_regex( r'https?://csp\.picsearch\.com/rest\?.*\be=(.+?)[&"\']', webpage, 'e') rest = self._download_json( 'http://csp.picsearch.com/rest?e=%s&containerId=mediaplayer&i=object' % e, video_id, transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1]) media = rest['media'] player_config = media['playerconfig'] playlist = player_config['playlist'] source = next(f for f in playlist if f.get('bitrates') or f.get('provider')) FORMAT_ID_REGEX = r'_([^_]+)_h264m\.mp4' formats = [] m3u8_url = source.get('url') if m3u8_url and determine_ext(m3u8_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) fallback_url = source.get('fallbackUrl') fallback_format_id = None if fallback_url: fallback_format_id = self._search_regex( FORMAT_ID_REGEX, fallback_url, 'format id', default=None) connection_url = (player_config.get('rtmp', {}).get( 'netConnectionUrl') or player_config.get( 'plugins', {}).get('bwcheck', {}).get('netConnectionUrl')) if connection_url: for f in source['bitrates']: video_url = f.get('url') if not video_url: continue format_id = self._search_regex( FORMAT_ID_REGEX, video_url, 'format id', default=None) f_common = { 'vbr': int_or_none(f.get('bitrate')), 'width': int_or_none(f.get('width')), 'height': int_or_none(f.get('height')), } f = f_common.copy() f.update({ 'url': connection_url, 'play_path': video_url, 'format_id': 'rtmp-%s' % format_id if format_id else 'rtmp', 'ext': 'flv', }) formats.append(f) if format_id and format_id == fallback_format_id: f = f_common.copy() f.update({ 'url': fallback_url, 'format_id': 'http-%s' % format_id if format_id else 'http', }) formats.append(f) if not formats and fallback_url: formats.append({ 'url': fallback_url, }) self._sort_formats(formats) title = player_config.get('title') or media['title'] description = player_config.get('mediaInfo', {}).get('description') thumbnail = media.get('image') duration = float_or_none(media.get('duration'), 1000) subtitles = {} captions = source.get('captionsAvailableLanguages') if isinstance(captions, dict): for lang, subtitle_url in captions.items(): subtitle_url = url_or_none(subtitle_url) if lang != 'none' and subtitle_url: subtitles.setdefault(lang, []).append({'url': subtitle_url}) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, 'subtitles': subtitles, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/kontrtube.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, ) class KontrTubeIE(InfoExtractor): IE_NAME = 'kontrtube' IE_DESC = 'KontrTube.ru - Труба зовёт' _VALID_URL = r'https?://(?:www\.)?kontrtube\.ru/videos/(?P<id>\d+)/(?P<display_id>[^/]+)/' _TEST = { 'url': 'http://www.kontrtube.ru/videos/2678/nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag/', 'md5': '975a991a4926c9a85f383a736a2e6b80', 'info_dict': { 'id': '2678', 'display_id': 'nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag', 'ext': 'mp4', 'title': 'Над олимпийской деревней в Сочи поднят российский флаг', 'description': 'md5:80edc4c613d5887ae8ccf1d59432be41', 'thumbnail': 'http://www.kontrtube.ru/contents/videos_screenshots/2000/2678/preview.mp4.jpg', 'duration': 270, } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') webpage = self._download_webpage( url, display_id, 'Downloading page') video_url = self._search_regex( r"video_url\s*:\s*'(.+?)/?',", webpage, 'video URL') thumbnail = self._search_regex( r"preview_url\s*:\s*'(.+?)/?',", webpage, 'thumbnail', fatal=False) title = self._html_search_regex( r'(?s)<h2>(.+?)</h2>', webpage, 'title') description = self._html_search_meta( 'description', webpage, 'description') duration = self._search_regex( r'Длительность: <em>([^<]+)</em>', webpage, 'duration', fatal=False) if duration: duration = parse_duration(duration.replace('мин', 'min').replace('сек', 'sec')) view_count = self._search_regex( r'Просмотров: <em>([^<]+)</em>', webpage, 'view count', fatal=False) if view_count: view_count = int_or_none(view_count.replace(' ', '')) comment_count = int_or_none(self._search_regex( r'Комментарии \((\d+)\)<', webpage, ' comment count', fatal=False)) return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'thumbnail': thumbnail, 'title': title, 'description': description, 'duration': duration, 'view_count': int_or_none(view_count), 'comment_count': int_or_none(comment_count), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/krasview.py
# coding: utf-8 from __future__ import unicode_literals import json from .common import InfoExtractor from ..utils import ( int_or_none, js_to_json, ) class KrasViewIE(InfoExtractor): IE_DESC = 'Красвью' _VALID_URL = r'https?://krasview\.ru/(?:video|embed)/(?P<id>\d+)' _TEST = { 'url': 'http://krasview.ru/video/512228', 'md5': '3b91003cf85fc5db277870c8ebd98eae', 'info_dict': { 'id': '512228', 'ext': 'mp4', 'title': 'Снег, лёд, заносы', 'description': 'Снято в городе Нягань, в Ханты-Мансийском автономном округе.', 'duration': 27, 'thumbnail': r're:^https?://.*\.jpg', }, 'params': { 'skip_download': 'Not accessible from Travis CI server', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) flashvars = json.loads(js_to_json(self._search_regex( r'video_Init\(({.+?})', webpage, 'flashvars'))) video_url = flashvars['url'] title = self._og_search_title(webpage) description = self._og_search_description(webpage, default=None) thumbnail = flashvars.get('image') or self._og_search_thumbnail(webpage) duration = int_or_none(flashvars.get('duration')) width = int_or_none(self._og_search_property( 'video:width', webpage, 'video width', default=None)) height = int_or_none(self._og_search_property( 'video:height', webpage, 'video height', default=None)) return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'width': width, 'height': height, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/ku6.py
from __future__ import unicode_literals from .common import InfoExtractor class Ku6IE(InfoExtractor): _VALID_URL = r'https?://v\.ku6\.com/show/(?P<id>[a-zA-Z0-9\-\_]+)(?:\.)*html' _TEST = { 'url': 'http://v.ku6.com/show/JG-8yS14xzBr4bCn1pu0xw...html', 'md5': '01203549b9efbb45f4b87d55bdea1ed1', 'info_dict': { 'id': 'JG-8yS14xzBr4bCn1pu0xw', 'ext': 'f4v', 'title': 'techniques test', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<h1 title=.*>(.*?)</h1>', webpage, 'title') dataUrl = 'http://v.ku6.com/fetchVideo4Player/%s.html' % video_id jsonData = self._download_json(dataUrl, video_id) downloadUrl = jsonData['data']['f'] return { 'id': video_id, 'title': title, 'url': downloadUrl }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/kusi.py
# coding: utf-8 from __future__ import unicode_literals import random import re from .common import InfoExtractor from ..compat import compat_urllib_parse_unquote_plus from ..utils import ( int_or_none, float_or_none, timeconvert, update_url_query, xpath_text, ) class KUSIIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?kusi\.com/(?P<path>story/.+|video\?clipId=(?P<clipId>\d+))' _TESTS = [{ 'url': 'http://www.kusi.com/story/32849881/turko-files-refused-to-help-it-aint-right', 'md5': '4e76ce8e53660ce9697d06c0ba6fc47d', 'info_dict': { 'id': '12689020', 'ext': 'mp4', 'title': "Turko Files: Refused to Help, It Ain't Right!", 'duration': 223.586, 'upload_date': '20160826', 'timestamp': 1472233118, 'thumbnail': r're:^https?://.*\.jpg$' }, }, { 'url': 'http://kusi.com/video?clipId=12203019', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) clip_id = mobj.group('clipId') video_id = clip_id or mobj.group('path') webpage = self._download_webpage(url, video_id) if clip_id is None: video_id = clip_id = self._html_search_regex( r'"clipId"\s*,\s*"(\d+)"', webpage, 'clip id') affiliate_id = self._search_regex( r'affiliateId\s*:\s*\'([^\']+)\'', webpage, 'affiliate id') # See __Packages/worldnow/model/GalleryModel.as of WNGallery.swf xml_url = update_url_query('http://www.kusi.com/build.asp', { 'buildtype': 'buildfeaturexmlrequest', 'featureType': 'Clip', 'featureid': clip_id, 'affiliateno': affiliate_id, 'clientgroupid': '1', 'rnd': int(round(random.random() * 1000000)), }) doc = self._download_xml(xml_url, video_id) video_title = xpath_text(doc, 'HEADLINE', fatal=True) duration = float_or_none(xpath_text(doc, 'DURATION'), scale=1000) description = xpath_text(doc, 'ABSTRACT') thumbnail = xpath_text(doc, './THUMBNAILIMAGE/FILENAME') createtion_time = timeconvert(xpath_text(doc, 'rfc822creationdate')) quality_options = doc.find('{http://search.yahoo.com/mrss/}group').findall('{http://search.yahoo.com/mrss/}content') formats = [] for quality in quality_options: formats.append({ 'url': compat_urllib_parse_unquote_plus(quality.attrib['url']), 'height': int_or_none(quality.attrib.get('height')), 'width': int_or_none(quality.attrib.get('width')), 'vbr': float_or_none(quality.attrib.get('bitratebits'), scale=1000), }) self._sort_formats(formats) return { 'id': video_id, 'title': video_title, 'description': description, 'duration': duration, 'formats': formats, 'thumbnail': thumbnail, 'timestamp': createtion_time, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/kuwo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( get_element_by_id, clean_html, ExtractorError, InAdvancePagedList, remove_start, ) class KuwoBaseIE(InfoExtractor): _FORMATS = [ {'format': 'ape', 'ext': 'ape', 'preference': 100}, {'format': 'mp3-320', 'ext': 'mp3', 'br': '320kmp3', 'abr': 320, 'preference': 80}, {'format': 'mp3-192', 'ext': 'mp3', 'br': '192kmp3', 'abr': 192, 'preference': 70}, {'format': 'mp3-128', 'ext': 'mp3', 'br': '128kmp3', 'abr': 128, 'preference': 60}, {'format': 'wma', 'ext': 'wma', 'preference': 20}, {'format': 'aac', 'ext': 'aac', 'abr': 48, 'preference': 10} ] def _get_formats(self, song_id, tolerate_ip_deny=False): formats = [] for file_format in self._FORMATS: query = { 'format': file_format['ext'], 'br': file_format.get('br', ''), 'rid': 'MUSIC_%s' % song_id, 'type': 'convert_url', 'response': 'url' } song_url = self._download_webpage( 'http://antiserver.kuwo.cn/anti.s', song_id, note='Download %s url info' % file_format['format'], query=query, headers=self.geo_verification_headers(), ) if song_url == 'IPDeny' and not tolerate_ip_deny: raise ExtractorError('This song is blocked in this region', expected=True) if song_url.startswith('http://') or song_url.startswith('https://'): formats.append({ 'url': song_url, 'format_id': file_format['format'], 'format': file_format['format'], 'preference': file_format['preference'], 'abr': file_format.get('abr'), }) return formats class KuwoIE(KuwoBaseIE): IE_NAME = 'kuwo:song' IE_DESC = '酷我音乐' _VALID_URL = r'https?://(?:www\.)?kuwo\.cn/yinyue/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.kuwo.cn/yinyue/635632/', 'info_dict': { 'id': '635632', 'ext': 'ape', 'title': '爱我别走', 'creator': '张震岳', 'upload_date': '20080122', 'description': 'md5:ed13f58e3c3bf3f7fd9fbc4e5a7aa75c' }, 'skip': 'this song has been offline because of copyright issues', }, { 'url': 'http://www.kuwo.cn/yinyue/6446136/', 'info_dict': { 'id': '6446136', 'ext': 'mp3', 'title': '心', 'description': 'md5:5d0e947b242c35dc0eb1d2fce9fbf02c', 'creator': 'IU', 'upload_date': '20150518', }, 'params': { 'format': 'mp3-320', }, }, { 'url': 'http://www.kuwo.cn/yinyue/3197154?catalog=yueku2016', 'only_matching': True, }] def _real_extract(self, url): song_id = self._match_id(url) webpage, urlh = self._download_webpage_handle( url, song_id, note='Download song detail info', errnote='Unable to get song detail info') if song_id not in urlh.geturl() or '对不起,该歌曲由于版权问题已被下线,将返回网站首页' in webpage: raise ExtractorError('this song has been offline because of copyright issues', expected=True) song_name = self._html_search_regex( r'<p[^>]+id="lrcName">([^<]+)</p>', webpage, 'song name') singer_name = remove_start(self._html_search_regex( r'<a[^>]+href="http://www\.kuwo\.cn/artist/content\?name=([^"]+)">', webpage, 'singer name', fatal=False), '歌手') lrc_content = clean_html(get_element_by_id('lrcContent', webpage)) if lrc_content == '暂无': # indicates no lyrics lrc_content = None formats = self._get_formats(song_id) self._sort_formats(formats) album_id = self._html_search_regex( r'<a[^>]+href="http://www\.kuwo\.cn/album/(\d+)/"', webpage, 'album id', fatal=False) publish_time = None if album_id is not None: album_info_page = self._download_webpage( 'http://www.kuwo.cn/album/%s/' % album_id, song_id, note='Download album detail info', errnote='Unable to get album detail info') publish_time = self._html_search_regex( r'发行时间:(\d{4}-\d{2}-\d{2})', album_info_page, 'publish time', fatal=False) if publish_time: publish_time = publish_time.replace('-', '') return { 'id': song_id, 'title': song_name, 'creator': singer_name, 'upload_date': publish_time, 'description': lrc_content, 'formats': formats, } class KuwoAlbumIE(InfoExtractor): IE_NAME = 'kuwo:album' IE_DESC = '酷我音乐 - 专辑' _VALID_URL = r'https?://(?:www\.)?kuwo\.cn/album/(?P<id>\d+?)/' _TEST = { 'url': 'http://www.kuwo.cn/album/502294/', 'info_dict': { 'id': '502294', 'title': 'Made\xa0Series\xa0《M》', 'description': 'md5:d463f0d8a0ff3c3ea3d6ed7452a9483f', }, 'playlist_count': 2, } def _real_extract(self, url): album_id = self._match_id(url) webpage = self._download_webpage( url, album_id, note='Download album info', errnote='Unable to get album info') album_name = self._html_search_regex( r'<div[^>]+class="comm"[^<]+<h1[^>]+title="([^"]+)"', webpage, 'album name') album_intro = remove_start( clean_html(get_element_by_id('intro', webpage)), '%s简介:' % album_name) entries = [ self.url_result(song_url, 'Kuwo') for song_url in re.findall( r'<p[^>]+class="listen"><a[^>]+href="(http://www\.kuwo\.cn/yinyue/\d+/)"', webpage) ] return self.playlist_result(entries, album_id, album_name, album_intro) class KuwoChartIE(InfoExtractor): IE_NAME = 'kuwo:chart' IE_DESC = '酷我音乐 - 排行榜' _VALID_URL = r'https?://yinyue\.kuwo\.cn/billboard_(?P<id>[^.]+).htm' _TEST = { 'url': 'http://yinyue.kuwo.cn/billboard_香港中文龙虎榜.htm', 'info_dict': { 'id': '香港中文龙虎榜', }, 'playlist_mincount': 7, } def _real_extract(self, url): chart_id = self._match_id(url) webpage = self._download_webpage( url, chart_id, note='Download chart info', errnote='Unable to get chart info') entries = [ self.url_result(song_url, 'Kuwo') for song_url in re.findall( r'<a[^>]+href="(http://www\.kuwo\.cn/yinyue/\d+)', webpage) ] return self.playlist_result(entries, chart_id) class KuwoSingerIE(InfoExtractor): IE_NAME = 'kuwo:singer' IE_DESC = '酷我音乐 - 歌手' _VALID_URL = r'https?://(?:www\.)?kuwo\.cn/mingxing/(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.kuwo.cn/mingxing/bruno+mars/', 'info_dict': { 'id': 'bruno+mars', 'title': 'Bruno\xa0Mars', }, 'playlist_mincount': 329, }, { 'url': 'http://www.kuwo.cn/mingxing/Ali/music.htm', 'info_dict': { 'id': 'Ali', 'title': 'Ali', }, 'playlist_mincount': 95, 'skip': 'Regularly stalls travis build', # See https://travis-ci.org/ytdl-org/youtube-dl/jobs/78878540 }] PAGE_SIZE = 15 def _real_extract(self, url): singer_id = self._match_id(url) webpage = self._download_webpage( url, singer_id, note='Download singer info', errnote='Unable to get singer info') singer_name = self._html_search_regex( r'<h1>([^<]+)</h1>', webpage, 'singer name') artist_id = self._html_search_regex( r'data-artistid="(\d+)"', webpage, 'artist id') page_count = int(self._html_search_regex( r'data-page="(\d+)"', webpage, 'page count')) def page_func(page_num): webpage = self._download_webpage( 'http://www.kuwo.cn/artist/contentMusicsAjax', singer_id, note='Download song list page #%d' % (page_num + 1), errnote='Unable to get song list page #%d' % (page_num + 1), query={'artistId': artist_id, 'pn': page_num, 'rn': self.PAGE_SIZE}) return [ self.url_result(compat_urlparse.urljoin(url, song_url), 'Kuwo') for song_url in re.findall( r'<div[^>]+class="name"><a[^>]+href="(/yinyue/\d+)', webpage) ] entries = InAdvancePagedList(page_func, page_count, self.PAGE_SIZE) return self.playlist_result(entries, singer_id, singer_name) class KuwoCategoryIE(InfoExtractor): IE_NAME = 'kuwo:category' IE_DESC = '酷我音乐 - 分类' _VALID_URL = r'https?://yinyue\.kuwo\.cn/yy/cinfo_(?P<id>\d+?).htm' _TEST = { 'url': 'http://yinyue.kuwo.cn/yy/cinfo_86375.htm', 'info_dict': { 'id': '86375', 'title': '八十年代精选', 'description': '这些都是属于八十年代的回忆!', }, 'playlist_mincount': 24, } def _real_extract(self, url): category_id = self._match_id(url) webpage = self._download_webpage( url, category_id, note='Download category info', errnote='Unable to get category info') category_name = self._html_search_regex( r'<h1[^>]+title="([^<>]+?)">[^<>]+?</h1>', webpage, 'category name') category_desc = remove_start( get_element_by_id('intro', webpage).strip(), '%s简介:' % category_name) if category_desc == '暂无': category_desc = None jsonm = self._parse_json(self._html_search_regex( r'var\s+jsonm\s*=\s*([^;]+);', webpage, 'category songs'), category_id) entries = [ self.url_result('http://www.kuwo.cn/yinyue/%s/' % song['musicrid'], 'Kuwo') for song in jsonm['musiclist'] ] return self.playlist_result(entries, category_id, category_name, category_desc) class KuwoMvIE(KuwoBaseIE): IE_NAME = 'kuwo:mv' IE_DESC = '酷我音乐 - MV' _VALID_URL = r'https?://(?:www\.)?kuwo\.cn/mv/(?P<id>\d+?)/' _TEST = { 'url': 'http://www.kuwo.cn/mv/6480076/', 'info_dict': { 'id': '6480076', 'ext': 'mp4', 'title': 'My HouseMV', 'creator': '2PM', }, # In this video, music URLs (anti.s) are blocked outside China and # USA, while the MV URL (mvurl) is available globally, so force the MV # URL for consistent results in different countries 'params': { 'format': 'mv', }, } _FORMATS = KuwoBaseIE._FORMATS + [ {'format': 'mkv', 'ext': 'mkv', 'preference': 250}, {'format': 'mp4', 'ext': 'mp4', 'preference': 200}, ] def _real_extract(self, url): song_id = self._match_id(url) webpage = self._download_webpage( url, song_id, note='Download mv detail info: %s' % song_id, errnote='Unable to get mv detail info: %s' % song_id) mobj = re.search( r'<h1[^>]+title="(?P<song>[^"]+)">[^<]+<span[^>]+title="(?P<singer>[^"]+)"', webpage) if mobj: song_name = mobj.group('song') singer_name = mobj.group('singer') else: raise ExtractorError('Unable to find song or singer names') formats = self._get_formats(song_id, tolerate_ip_deny=True) mv_url = self._download_webpage( 'http://www.kuwo.cn/yy/st/mvurl?rid=MUSIC_%s' % song_id, song_id, note='Download %s MV URL' % song_id) formats.append({ 'url': mv_url, 'format_id': 'mv', }) self._sort_formats(formats) return { 'id': song_id, 'title': song_name, 'creator': singer_name, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/la7.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( js_to_json, smuggle_url, ) class LA7IE(InfoExtractor): IE_NAME = 'la7.it' _VALID_URL = r'''(?x)(https?://)?(?: (?:www\.)?la7\.it/([^/]+)/(?:rivedila7|video)/| tg\.la7\.it/repliche-tgla7\?id= )(?P<id>.+)''' _TESTS = [{ # 'src' is a plain URL 'url': 'http://www.la7.it/crozza/video/inccool8-02-10-2015-163722', 'md5': '8b613ffc0c4bf9b9e377169fc19c214c', 'info_dict': { 'id': '0_42j6wd36', 'ext': 'mp4', 'title': 'Inc.Cool8', 'description': 'Benvenuti nell\'incredibile mondo della INC. COOL. 8. dove “INC.” sta per “Incorporated” “COOL” sta per “fashion” ed Eight sta per il gesto atletico', 'thumbnail': 're:^https?://.*', 'uploader_id': '[email protected]', 'timestamp': 1443814869, 'upload_date': '20151002', }, }, { # 'src' is a dictionary 'url': 'http://tg.la7.it/repliche-tgla7?id=189080', 'md5': '6b0d8888d286e39870208dfeceaf456b', 'info_dict': { 'id': '189080', 'ext': 'mp4', 'title': 'TG LA7', }, }, { 'url': 'http://www.la7.it/omnibus/rivedila7/omnibus-news-02-07-2016-189077', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) player_data = self._parse_json( self._search_regex( [r'(?s)videoParams\s*=\s*({.+?});', r'videoLa7\(({[^;]+})\);'], webpage, 'player data'), video_id, transform_source=js_to_json) return { '_type': 'url_transparent', 'url': smuggle_url('kaltura:103:%s' % player_data['vid'], { 'service_url': 'http://nkdam.iltrovatore.it', }), 'id': video_id, 'title': player_data['title'], 'description': self._og_search_description(webpage, default=None), 'thumbnail': player_data.get('poster'), 'ie_key': 'Kaltura', }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/laola1tv.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import ( ExtractorError, unified_strdate, urlencode_postdata, xpath_element, xpath_text, update_url_query, js_to_json, ) class Laola1TvEmbedIE(InfoExtractor): IE_NAME = 'laola1tv:embed' _VALID_URL = r'https?://(?:www\.)?laola1\.tv/titanplayer\.php\?.*?\bvideoid=(?P<id>\d+)' _TESTS = [{ # flashvars.premium = "false"; 'url': 'https://www.laola1.tv/titanplayer.php?videoid=708065&type=V&lang=en&portal=int&customer=1024', 'info_dict': { 'id': '708065', 'ext': 'mp4', 'title': 'MA Long CHN - FAN Zhendong CHN', 'uploader': 'ITTF - International Table Tennis Federation', 'upload_date': '20161211', }, }] def _extract_token_url(self, stream_access_url, video_id, data): return self._download_json( self._proto_relative_url(stream_access_url, 'https:'), video_id, headers={ 'Content-Type': 'application/json', }, data=json.dumps(data).encode())['data']['stream-access'][0] def _extract_formats(self, token_url, video_id): token_doc = self._download_xml( token_url, video_id, 'Downloading token', headers=self.geo_verification_headers()) token_attrib = xpath_element(token_doc, './/token').attrib if token_attrib['status'] != '0': raise ExtractorError( 'Token error: %s' % token_attrib['comment'], expected=True) formats = self._extract_akamai_formats( '%s?hdnea=%s' % (token_attrib['url'], token_attrib['auth']), video_id) self._sort_formats(formats) return formats def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) flash_vars = self._search_regex( r'(?s)flashvars\s*=\s*({.+?});', webpage, 'flash vars') def get_flashvar(x, *args, **kwargs): flash_var = self._search_regex( r'%s\s*:\s*"([^"]+)"' % x, flash_vars, x, default=None) if not flash_var: flash_var = self._search_regex([ r'flashvars\.%s\s*=\s*"([^"]+)"' % x, r'%s\s*=\s*"([^"]+)"' % x], webpage, x, *args, **kwargs) return flash_var hd_doc = self._download_xml( 'http://www.laola1.tv/server/hd_video.php', video_id, query={ 'play': get_flashvar('streamid'), 'partner': get_flashvar('partnerid'), 'portal': get_flashvar('portalid'), 'lang': get_flashvar('sprache'), 'v5ident': '', }) _v = lambda x, **k: xpath_text(hd_doc, './/video/' + x, **k) title = _v('title', fatal=True) token_url = None premium = get_flashvar('premium', default=None) if premium: token_url = update_url_query( _v('url', fatal=True), { 'timestamp': get_flashvar('timestamp'), 'auth': get_flashvar('auth'), }) else: data_abo = urlencode_postdata( dict((i, v) for i, v in enumerate(_v('req_liga_abos').split(',')))) stream_access_url = update_url_query( 'https://club.laola1.tv/sp/laola1/api/v3/user/session/premium/player/stream-access', { 'videoId': _v('id'), 'target': self._search_regex(r'vs_target = (\d+);', webpage, 'vs target'), 'label': _v('label'), 'area': _v('area'), }) token_url = self._extract_token_url(stream_access_url, video_id, data_abo) formats = self._extract_formats(token_url, video_id) categories_str = _v('meta_sports') categories = categories_str.split(',') if categories_str else [] is_live = _v('islive') == 'true' return { 'id': video_id, 'title': self._live_title(title) if is_live else title, 'upload_date': unified_strdate(_v('time_date')), 'uploader': _v('meta_organisation'), 'categories': categories, 'is_live': is_live, 'formats': formats, } class Laola1TvBaseIE(Laola1TvEmbedIE): def _extract_video(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) if 'Dieser Livestream ist bereits beendet.' in webpage: raise ExtractorError('This live stream has already finished.', expected=True) conf = self._parse_json(self._search_regex( r'(?s)conf\s*=\s*({.+?});', webpage, 'conf'), display_id, transform_source=lambda s: js_to_json(re.sub(r'shareurl:.+,', '', s))) video_id = conf['videoid'] config = self._download_json(conf['configUrl'], video_id, query={ 'videoid': video_id, 'partnerid': conf['partnerid'], 'language': conf.get('language', ''), 'portal': conf.get('portalid', ''), }) error = config.get('error') if error: raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True) video_data = config['video'] title = video_data['title'] is_live = video_data.get('isLivestream') and video_data.get('isLive') meta = video_data.get('metaInformation') sports = meta.get('sports') categories = sports.split(',') if sports else [] token_url = self._extract_token_url( video_data['streamAccess'], video_id, video_data['abo']['required']) formats = self._extract_formats(token_url, video_id) return { 'id': video_id, 'display_id': display_id, 'title': self._live_title(title) if is_live else title, 'description': video_data.get('description'), 'thumbnail': video_data.get('image'), 'categories': categories, 'formats': formats, 'is_live': is_live, } class Laola1TvIE(Laola1TvBaseIE): IE_NAME = 'laola1tv' _VALID_URL = r'https?://(?:www\.)?laola1\.tv/[a-z]+-[a-z]+/[^/]+/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie/227883.html', 'info_dict': { 'id': '227883', 'display_id': 'straubing-tigers-koelner-haie', 'ext': 'flv', 'title': 'Straubing Tigers - Kölner Haie', 'upload_date': '20140912', 'is_live': False, 'categories': ['Eishockey'], }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie', 'info_dict': { 'id': '464602', 'display_id': 'straubing-tigers-koelner-haie', 'ext': 'flv', 'title': 'Straubing Tigers - Kölner Haie', 'upload_date': '20160129', 'is_live': False, 'categories': ['Eishockey'], }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.laola1.tv/de-de/livestream/2016-03-22-belogorie-belgorod-trentino-diatec-lde', 'info_dict': { 'id': '487850', 'display_id': '2016-03-22-belogorie-belgorod-trentino-diatec-lde', 'ext': 'flv', 'title': 'Belogorie BELGOROD - TRENTINO Diatec', 'upload_date': '20160322', 'uploader': 'CEV - Europäischer Volleyball Verband', 'is_live': True, 'categories': ['Volleyball'], }, 'params': { 'skip_download': True, }, 'skip': 'This live stream has already finished.', }] def _real_extract(self, url): return self._extract_video(url) class EHFTVIE(Laola1TvBaseIE): IE_NAME = 'ehftv' _VALID_URL = r'https?://(?:www\.)?ehftv\.com/[a-z]+(?:-[a-z]+)?/[^/]+/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.ehftv.com/int/video/paris-saint-germain-handball-pge-vive-kielce/1166761', 'info_dict': { 'id': '1166761', 'display_id': 'paris-saint-germain-handball-pge-vive-kielce', 'ext': 'mp4', 'title': 'Paris Saint-Germain Handball - PGE Vive Kielce', 'is_live': False, 'categories': ['Handball'], }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): return self._extract_video(url) class ITTFIE(InfoExtractor): _VALID_URL = r'https?://tv\.ittf\.com/video/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'https://tv.ittf.com/video/peng-wang-wei-matsudaira-kenta/951802', 'only_matching': True, } def _real_extract(self, url): return self.url_result( update_url_query('https://www.laola1.tv/titanplayer.php', { 'videoid': self._match_id(url), 'type': 'V', 'lang': 'en', 'portal': 'int', 'customer': 1024, }), Laola1TvEmbedIE.ie_key())
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/lci.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class LCIIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?lci\.fr/[^/]+/[\w-]+-(?P<id>\d+)\.html' _TEST = { 'url': 'http://www.lci.fr/international/etats-unis-a-j-62-hillary-clinton-reste-sans-voix-2001679.html', 'md5': '2fdb2538b884d4d695f9bd2bde137e6c', 'info_dict': { 'id': '13244802', 'ext': 'mp4', 'title': 'Hillary Clinton et sa quinte de toux, en plein meeting', 'description': 'md5:a4363e3a960860132f8124b62f4a01c9', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) wat_id = self._search_regex( (r'data-watid=[\'"](\d+)', r'idwat["\']?\s*:\s*["\']?(\d+)'), webpage, 'wat id') return self.url_result('wat:' + wat_id, 'Wat', wat_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/lcp.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from .arkena import ArkenaIE class LcpPlayIE(ArkenaIE): _VALID_URL = r'https?://play\.lcp\.fr/embed/(?P<id>[^/]+)/(?P<account_id>[^/]+)/[^/]+/[^/]+' _TESTS = [{ 'url': 'http://play.lcp.fr/embed/327336/131064/darkmatter/0', 'md5': 'b8bd9298542929c06c1c15788b1f277a', 'info_dict': { 'id': '327336', 'ext': 'mp4', 'title': '327336', 'timestamp': 1456391602, 'upload_date': '20160225', }, 'params': { 'skip_download': True, }, }] class LcpIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?lcp\.fr/(?:[^/]+/)*(?P<id>[^/]+)' _TESTS = [{ # arkena embed 'url': 'http://www.lcp.fr/la-politique-en-video/schwartzenberg-prg-preconise-francois-hollande-de-participer-une-primaire', 'md5': 'b8bd9298542929c06c1c15788b1f277a', 'info_dict': { 'id': 'd56d03e9', 'ext': 'mp4', 'title': 'Schwartzenberg (PRG) préconise à François Hollande de participer à une primaire à gauche', 'description': 'md5:96ad55009548da9dea19f4120c6c16a8', 'timestamp': 1456488895, 'upload_date': '20160226', }, 'params': { 'skip_download': True, }, }, { # dailymotion live stream 'url': 'http://www.lcp.fr/le-direct', 'info_dict': { 'id': 'xji3qy', 'ext': 'mp4', 'title': 'La Chaine Parlementaire (LCP), Live TNT', 'description': 'md5:5c69593f2de0f38bd9a949f2c95e870b', 'uploader': 'LCP', 'uploader_id': 'xbz33d', 'timestamp': 1308923058, 'upload_date': '20110624', }, 'params': { # m3u8 live stream 'skip_download': True, }, }, { 'url': 'http://www.lcp.fr/emissions/277792-les-volontaires', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) play_url = self._search_regex( r'<iframe[^>]+src=(["\'])(?P<url>%s?(?:(?!\1).)*)\1' % LcpPlayIE._VALID_URL, webpage, 'play iframe', default=None, group='url') if not play_url: return self.url_result(url, 'Generic') title = self._og_search_title(webpage, default=None) or self._html_search_meta( 'twitter:title', webpage, fatal=True) description = self._html_search_meta( ('description', 'twitter:description'), webpage) return { '_type': 'url_transparent', 'ie_key': LcpPlayIE.ie_key(), 'url': play_url, 'display_id': display_id, 'title': title, 'description': description, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/lecture2go.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, determine_protocol, parse_duration, int_or_none, ) class Lecture2GoIE(InfoExtractor): _VALID_URL = r'https?://lecture2go\.uni-hamburg\.de/veranstaltungen/-/v/(?P<id>\d+)' _TEST = { 'url': 'https://lecture2go.uni-hamburg.de/veranstaltungen/-/v/17473', 'md5': 'ac02b570883020d208d405d5a3fd2f7f', 'info_dict': { 'id': '17473', 'ext': 'mp4', 'title': '2 - Endliche Automaten und reguläre Sprachen', 'creator': 'Frank Heitmann', 'duration': 5220, }, 'params': { # m3u8 download 'skip_download': True, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex(r'<em[^>]+class="title">(.+)</em>', webpage, 'title') formats = [] for url in set(re.findall(r'var\s+playerUri\d+\s*=\s*"([^"]+)"', webpage)): ext = determine_ext(url) protocol = determine_protocol({'url': url}) if ext == 'f4m': formats.extend(self._extract_f4m_formats(url, video_id, f4m_id='hds')) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats(url, video_id, ext='mp4', m3u8_id='hls')) else: if protocol == 'rtmp': continue # XXX: currently broken formats.append({ 'format_id': protocol, 'url': url, }) self._sort_formats(formats) creator = self._html_search_regex( r'<div[^>]+id="description">([^<]+)</div>', webpage, 'creator', fatal=False) duration = parse_duration(self._html_search_regex( r'Duration:\s*</em>\s*<em[^>]*>([^<]+)</em>', webpage, 'duration', fatal=False)) view_count = int_or_none(self._html_search_regex( r'Views:\s*</em>\s*<em[^>]+>(\d+)</em>', webpage, 'view count', fatal=False)) return { 'id': video_id, 'title': title, 'formats': formats, 'creator': creator, 'duration': duration, 'view_count': view_count, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/lecturio.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( clean_html, determine_ext, ExtractorError, float_or_none, int_or_none, str_or_none, url_or_none, urlencode_postdata, urljoin, ) class LecturioBaseIE(InfoExtractor): _API_BASE_URL = 'https://app.lecturio.com/api/en/latest/html5/' _LOGIN_URL = 'https://app.lecturio.com/en/login' _NETRC_MACHINE = 'lecturio' def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return # Sets some cookies _, urlh = self._download_webpage_handle( self._LOGIN_URL, None, 'Downloading login popup') def is_logged(url_handle): return self._LOGIN_URL not in compat_str(url_handle.geturl()) # Already logged in if is_logged(urlh): return login_form = { 'signin[email]': username, 'signin[password]': password, 'signin[remember]': 'on', } response, urlh = self._download_webpage_handle( self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(login_form)) # Logged in successfully if is_logged(urlh): return errors = self._html_search_regex( r'(?s)<ul[^>]+class=["\']error_list[^>]+>(.+?)</ul>', response, 'errors', default=None) if errors: raise ExtractorError('Unable to login: %s' % errors, expected=True) raise ExtractorError('Unable to log in') class LecturioIE(LecturioBaseIE): _VALID_URL = r'''(?x) https:// (?: app\.lecturio\.com/([^/]+/(?P<nt>[^/?#&]+)\.lecture|(?:\#/)?lecture/c/\d+/(?P<id>\d+))| (?:www\.)?lecturio\.de/[^/]+/(?P<nt_de>[^/?#&]+)\.vortrag ) ''' _TESTS = [{ 'url': 'https://app.lecturio.com/medical-courses/important-concepts-and-terms-introduction-to-microbiology.lecture#tab/videos', 'md5': '9a42cf1d8282a6311bf7211bbde26fde', 'info_dict': { 'id': '39634', 'ext': 'mp4', 'title': 'Important Concepts and Terms — Introduction to Microbiology', }, 'skip': 'Requires lecturio account credentials', }, { 'url': 'https://www.lecturio.de/jura/oeffentliches-recht-staatsexamen.vortrag', 'only_matching': True, }, { 'url': 'https://app.lecturio.com/#/lecture/c/6434/39634', 'only_matching': True, }] _CC_LANGS = { 'Arabic': 'ar', 'Bulgarian': 'bg', 'German': 'de', 'English': 'en', 'Spanish': 'es', 'Persian': 'fa', 'French': 'fr', 'Japanese': 'ja', 'Polish': 'pl', 'Pashto': 'ps', 'Russian': 'ru', } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) nt = mobj.group('nt') or mobj.group('nt_de') lecture_id = mobj.group('id') display_id = nt or lecture_id api_path = 'lectures/' + lecture_id if lecture_id else 'lecture/' + nt + '.json' video = self._download_json( self._API_BASE_URL + api_path, display_id) title = video['title'].strip() if not lecture_id: pid = video.get('productId') or video.get('uid') if pid: spid = pid.split('_') if spid and len(spid) == 2: lecture_id = spid[1] formats = [] for format_ in video['content']['media']: if not isinstance(format_, dict): continue file_ = format_.get('file') if not file_: continue ext = determine_ext(file_) if ext == 'smil': # smil contains only broken RTMP formats anyway continue file_url = url_or_none(file_) if not file_url: continue label = str_or_none(format_.get('label')) filesize = int_or_none(format_.get('fileSize')) f = { 'url': file_url, 'format_id': label, 'filesize': float_or_none(filesize, invscale=1000) } if label: mobj = re.match(r'(\d+)p\s*\(([^)]+)\)', label) if mobj: f.update({ 'format_id': mobj.group(2), 'height': int(mobj.group(1)), }) formats.append(f) self._sort_formats(formats) subtitles = {} automatic_captions = {} captions = video.get('captions') or [] for cc in captions: cc_url = cc.get('url') if not cc_url: continue cc_label = cc.get('translatedCode') lang = cc.get('languageCode') or self._search_regex( r'/([a-z]{2})_', cc_url, 'lang', default=cc_label.split()[0] if cc_label else 'en') original_lang = self._search_regex( r'/[a-z]{2}_([a-z]{2})_', cc_url, 'original lang', default=None) sub_dict = (automatic_captions if 'auto-translated' in cc_label or original_lang else subtitles) sub_dict.setdefault(self._CC_LANGS.get(lang, lang), []).append({ 'url': cc_url, }) return { 'id': lecture_id or nt, 'title': title, 'formats': formats, 'subtitles': subtitles, 'automatic_captions': automatic_captions, } class LecturioCourseIE(LecturioBaseIE): _VALID_URL = r'https://app\.lecturio\.com/(?:[^/]+/(?P<nt>[^/?#&]+)\.course|(?:#/)?course/c/(?P<id>\d+))' _TESTS = [{ 'url': 'https://app.lecturio.com/medical-courses/microbiology-introduction.course#/', 'info_dict': { 'id': 'microbiology-introduction', 'title': 'Microbiology: Introduction', 'description': 'md5:13da8500c25880c6016ae1e6d78c386a', }, 'playlist_count': 45, 'skip': 'Requires lecturio account credentials', }, { 'url': 'https://app.lecturio.com/#/course/c/6434', 'only_matching': True, }] def _real_extract(self, url): nt, course_id = re.match(self._VALID_URL, url).groups() display_id = nt or course_id api_path = 'courses/' + course_id if course_id else 'course/content/' + nt + '.json' course = self._download_json( self._API_BASE_URL + api_path, display_id) entries = [] for lecture in course.get('lectures', []): lecture_id = str_or_none(lecture.get('id')) lecture_url = lecture.get('url') if lecture_url: lecture_url = urljoin(url, lecture_url) else: lecture_url = 'https://app.lecturio.com/#/lecture/c/%s/%s' % (course_id, lecture_id) entries.append(self.url_result( lecture_url, ie=LecturioIE.ie_key(), video_id=lecture_id)) return self.playlist_result( entries, display_id, course.get('title'), clean_html(course.get('description'))) class LecturioDeCourseIE(LecturioBaseIE): _VALID_URL = r'https://(?:www\.)?lecturio\.de/[^/]+/(?P<id>[^/?#&]+)\.kurs' _TEST = { 'url': 'https://www.lecturio.de/jura/grundrechte.kurs', 'only_matching': True, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) entries = [] for mobj in re.finditer( r'(?s)<td[^>]+\bdata-lecture-id=["\'](?P<id>\d+).+?\bhref=(["\'])(?P<url>(?:(?!\2).)+\.vortrag)\b[^>]+>', webpage): lecture_url = urljoin(url, mobj.group('url')) lecture_id = mobj.group('id') entries.append(self.url_result( lecture_url, ie=LecturioIE.ie_key(), video_id=lecture_id)) title = self._search_regex( r'<h1[^>]*>([^<]+)', webpage, 'title', default=None) return self.playlist_result(entries, display_id, title)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/leeco.py
# coding: utf-8 from __future__ import unicode_literals import datetime import hashlib import re import time from .common import InfoExtractor from ..compat import ( compat_b64decode, compat_ord, compat_str, compat_urllib_parse_urlencode, ) from ..utils import ( determine_ext, encode_data_uri, ExtractorError, int_or_none, orderedSet, parse_iso8601, str_or_none, url_basename, urshift, ) class LeIE(InfoExtractor): IE_DESC = '乐视网' _VALID_URL = r'https?://(?:www\.le\.com/ptv/vplay|(?:sports\.le|(?:www\.)?lesports)\.com/(?:match|video))/(?P<id>\d+)\.html' _GEO_COUNTRIES = ['CN'] _URL_TEMPLATE = 'http://www.le.com/ptv/vplay/%s.html' _TESTS = [{ 'url': 'http://www.le.com/ptv/vplay/22005890.html', 'md5': 'edadcfe5406976f42f9f266057ee5e40', 'info_dict': { 'id': '22005890', 'ext': 'mp4', 'title': '第87届奥斯卡颁奖礼完美落幕 《鸟人》成最大赢家', 'description': 'md5:a9cb175fd753e2962176b7beca21a47c', }, 'params': { 'hls_prefer_native': True, }, }, { 'url': 'http://www.le.com/ptv/vplay/1415246.html', 'info_dict': { 'id': '1415246', 'ext': 'mp4', 'title': '美人天下01', 'description': 'md5:28942e650e82ed4fcc8e4de919ee854d', }, 'params': { 'hls_prefer_native': True, }, }, { 'note': 'This video is available only in Mainland China, thus a proxy is needed', 'url': 'http://www.le.com/ptv/vplay/1118082.html', 'md5': '2424c74948a62e5f31988438979c5ad1', 'info_dict': { 'id': '1118082', 'ext': 'mp4', 'title': '与龙共舞 完整版', 'description': 'md5:7506a5eeb1722bb9d4068f85024e3986', }, 'params': { 'hls_prefer_native': True, }, }, { 'url': 'http://sports.le.com/video/25737697.html', 'only_matching': True, }, { 'url': 'http://www.lesports.com/match/1023203003.html', 'only_matching': True, }, { 'url': 'http://sports.le.com/match/1023203003.html', 'only_matching': True, }] # ror() and calc_time_key() are reversed from a embedded swf file in LetvPlayer.swf def ror(self, param1, param2): _loc3_ = 0 while _loc3_ < param2: param1 = urshift(param1, 1) + ((param1 & 1) << 31) _loc3_ += 1 return param1 def calc_time_key(self, param1): _loc2_ = 185025305 return self.ror(param1, _loc2_ % 17) ^ _loc2_ # see M3U8Encryption class in KLetvPlayer.swf @staticmethod def decrypt_m3u8(encrypted_data): if encrypted_data[:5].decode('utf-8').lower() != 'vc_01': return encrypted_data encrypted_data = encrypted_data[5:] _loc4_ = bytearray(2 * len(encrypted_data)) for idx, val in enumerate(encrypted_data): b = compat_ord(val) _loc4_[2 * idx] = b // 16 _loc4_[2 * idx + 1] = b % 16 idx = len(_loc4_) - 11 _loc4_ = _loc4_[idx:] + _loc4_[:idx] _loc7_ = bytearray(len(encrypted_data)) for i in range(len(encrypted_data)): _loc7_[i] = _loc4_[2 * i] * 16 + _loc4_[2 * i + 1] return bytes(_loc7_) def _check_errors(self, play_json): # Check for errors playstatus = play_json['msgs']['playstatus'] if playstatus['status'] == 0: flag = playstatus['flag'] if flag == 1: self.raise_geo_restricted() else: raise ExtractorError('Generic error. flag = %d' % flag, expected=True) def _real_extract(self, url): media_id = self._match_id(url) page = self._download_webpage(url, media_id) play_json_flash = self._download_json( 'http://player-pc.le.com/mms/out/video/playJson', media_id, 'Downloading flash playJson data', query={ 'id': media_id, 'platid': 1, 'splatid': 105, 'format': 1, 'source': 1000, 'tkey': self.calc_time_key(int(time.time())), 'domain': 'www.le.com', 'region': 'cn', }, headers=self.geo_verification_headers()) self._check_errors(play_json_flash) def get_flash_urls(media_url, format_id): nodes_data = self._download_json( media_url, media_id, 'Download JSON metadata for format %s' % format_id, query={ 'm3v': 1, 'format': 1, 'expect': 3, 'tss': 'ios', }) req = self._request_webpage( nodes_data['nodelist'][0]['location'], media_id, note='Downloading m3u8 information for format %s' % format_id) m3u8_data = self.decrypt_m3u8(req.read()) return { 'hls': encode_data_uri(m3u8_data, 'application/vnd.apple.mpegurl'), } extracted_formats = [] formats = [] playurl = play_json_flash['msgs']['playurl'] play_domain = playurl['domain'][0] for format_id, format_data in playurl.get('dispatch', []).items(): if format_id in extracted_formats: continue extracted_formats.append(format_id) media_url = play_domain + format_data[0] for protocol, format_url in get_flash_urls(media_url, format_id).items(): f = { 'url': format_url, 'ext': determine_ext(format_data[1]), 'format_id': '%s-%s' % (protocol, format_id), 'protocol': 'm3u8_native' if protocol == 'hls' else 'http', 'quality': int_or_none(format_id), } if format_id[-1:] == 'p': f['height'] = int_or_none(format_id[:-1]) formats.append(f) self._sort_formats(formats, ('height', 'quality', 'format_id')) publish_time = parse_iso8601(self._html_search_regex( r'发布时间&nbsp;([^<>]+) ', page, 'publish time', default=None), delimiter=' ', timezone=datetime.timedelta(hours=8)) description = self._html_search_meta('description', page, fatal=False) return { 'id': media_id, 'formats': formats, 'title': playurl['title'], 'thumbnail': playurl['pic'], 'description': description, 'timestamp': publish_time, } class LePlaylistIE(InfoExtractor): _VALID_URL = r'https?://[a-z]+\.le\.com/(?!video)[a-z]+/(?P<id>[a-z0-9_]+)' _TESTS = [{ 'url': 'http://www.le.com/tv/46177.html', 'info_dict': { 'id': '46177', 'title': '美人天下', 'description': 'md5:395666ff41b44080396e59570dbac01c' }, 'playlist_count': 35 }, { 'url': 'http://tv.le.com/izt/wuzetian/index.html', 'info_dict': { 'id': 'wuzetian', 'title': '武媚娘传奇', 'description': 'md5:e12499475ab3d50219e5bba00b3cb248' }, # This playlist contains some extra videos other than the drama itself 'playlist_mincount': 96 }, { 'url': 'http://tv.le.com/pzt/lswjzzjc/index.shtml', # This series is moved to http://www.le.com/tv/10005297.html 'only_matching': True, }, { 'url': 'http://www.le.com/comic/92063.html', 'only_matching': True, }, { 'url': 'http://list.le.com/listn/c1009_sc532002_d2_p1_o1.html', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if LeIE.suitable(url) else super(LePlaylistIE, cls).suitable(url) def _real_extract(self, url): playlist_id = self._match_id(url) page = self._download_webpage(url, playlist_id) # Currently old domain names are still used in playlists media_ids = orderedSet(re.findall( r'<a[^>]+href="http://www\.letv\.com/ptv/vplay/(\d+)\.html', page)) entries = [self.url_result(LeIE._URL_TEMPLATE % media_id, ie='Le') for media_id in media_ids] title = self._html_search_meta('keywords', page, fatal=False).split(',')[0] description = self._html_search_meta('description', page, fatal=False) return self.playlist_result(entries, playlist_id, playlist_title=title, playlist_description=description) class LetvCloudIE(InfoExtractor): # Most of *.letv.com is changed to *.le.com on 2016/01/02 # but yuntv.letv.com is kept, so also keep the extractor name IE_DESC = '乐视云' _VALID_URL = r'https?://yuntv\.letv\.com/bcloud.html\?.+' _TESTS = [{ 'url': 'http://yuntv.letv.com/bcloud.html?uu=p7jnfw5hw9&vu=467623dedf', 'md5': '26450599afd64c513bc77030ad15db44', 'info_dict': { 'id': 'p7jnfw5hw9_467623dedf', 'ext': 'mp4', 'title': 'Video p7jnfw5hw9_467623dedf', }, }, { 'url': 'http://yuntv.letv.com/bcloud.html?uu=p7jnfw5hw9&vu=ec93197892&pu=2c7cd40209&auto_play=1&gpcflag=1&width=640&height=360', 'md5': 'e03d9cc8d9c13191e1caf277e42dbd31', 'info_dict': { 'id': 'p7jnfw5hw9_ec93197892', 'ext': 'mp4', 'title': 'Video p7jnfw5hw9_ec93197892', }, }, { 'url': 'http://yuntv.letv.com/bcloud.html?uu=p7jnfw5hw9&vu=187060b6fd', 'md5': 'cb988699a776b22d4a41b9d43acfb3ac', 'info_dict': { 'id': 'p7jnfw5hw9_187060b6fd', 'ext': 'mp4', 'title': 'Video p7jnfw5hw9_187060b6fd', }, }] @staticmethod def sign_data(obj): if obj['cf'] == 'flash': salt = '2f9d6924b33a165a6d8b5d3d42f4f987' items = ['cf', 'format', 'ran', 'uu', 'ver', 'vu'] elif obj['cf'] == 'html5': salt = 'fbeh5player12c43eccf2bec3300344' items = ['cf', 'ran', 'uu', 'bver', 'vu'] input_data = ''.join([item + obj[item] for item in items]) + salt obj['sign'] = hashlib.md5(input_data.encode('utf-8')).hexdigest() def _get_formats(self, cf, uu, vu, media_id): def get_play_json(cf, timestamp): data = { 'cf': cf, 'ver': '2.2', 'bver': 'firefox44.0', 'format': 'json', 'uu': uu, 'vu': vu, 'ran': compat_str(timestamp), } self.sign_data(data) return self._download_json( 'http://api.letvcloud.com/gpc.php?' + compat_urllib_parse_urlencode(data), media_id, 'Downloading playJson data for type %s' % cf) play_json = get_play_json(cf, time.time()) # The server time may be different from local time if play_json.get('code') == 10071: play_json = get_play_json(cf, play_json['timestamp']) if not play_json.get('data'): if play_json.get('message'): raise ExtractorError('Letv cloud said: %s' % play_json['message'], expected=True) elif play_json.get('code'): raise ExtractorError('Letv cloud returned error %d' % play_json['code'], expected=True) else: raise ExtractorError('Letv cloud returned an unknown error') def b64decode(s): return compat_b64decode(s).decode('utf-8') formats = [] for media in play_json['data']['video_info']['media'].values(): play_url = media['play_url'] url = b64decode(play_url['main_url']) decoded_url = b64decode(url_basename(url)) formats.append({ 'url': url, 'ext': determine_ext(decoded_url), 'format_id': str_or_none(play_url.get('vtype')), 'format_note': str_or_none(play_url.get('definition')), 'width': int_or_none(play_url.get('vwidth')), 'height': int_or_none(play_url.get('vheight')), }) return formats def _real_extract(self, url): uu_mobj = re.search(r'uu=([\w]+)', url) vu_mobj = re.search(r'vu=([\w]+)', url) if not uu_mobj or not vu_mobj: raise ExtractorError('Invalid URL: %s' % url, expected=True) uu = uu_mobj.group(1) vu = vu_mobj.group(1) media_id = uu + '_' + vu formats = self._get_formats('flash', uu, vu, media_id) + self._get_formats('html5', uu, vu, media_id) self._sort_formats(formats) return { 'id': media_id, 'title': 'Video %s' % media_id, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/lego.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( unescapeHTML, parse_duration, get_element_by_class, ) class LEGOIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?lego\.com/(?P<locale>[^/]+)/(?:[^/]+/)*videos/(?:[^/]+/)*[^/?#]+-(?P<id>[0-9a-f]+)' _TESTS = [{ 'url': 'http://www.lego.com/en-us/videos/themes/club/blocumentary-kawaguchi-55492d823b1b4d5e985787fa8c2973b1', 'md5': 'f34468f176cfd76488767fc162c405fa', 'info_dict': { 'id': '55492d823b1b4d5e985787fa8c2973b1', 'ext': 'mp4', 'title': 'Blocumentary Great Creations: Akiyuki Kawaguchi', 'description': 'Blocumentary Great Creations: Akiyuki Kawaguchi', }, }, { # geo-restricted but the contentUrl contain a valid url 'url': 'http://www.lego.com/nl-nl/videos/themes/nexoknights/episode-20-kingdom-of-heroes-13bdc2299ab24d9685701a915b3d71e7##sp=399', 'md5': '4c3fec48a12e40c6e5995abc3d36cc2e', 'info_dict': { 'id': '13bdc2299ab24d9685701a915b3d71e7', 'ext': 'mp4', 'title': 'Aflevering 20 - Helden van het koninkrijk', 'description': 'md5:8ee499aac26d7fa8bcb0cedb7f9c3941', }, }, { # special characters in title 'url': 'http://www.lego.com/en-us/starwars/videos/lego-star-wars-force-surprise-9685ee9d12e84ff38e84b4e3d0db533d', 'info_dict': { 'id': '9685ee9d12e84ff38e84b4e3d0db533d', 'ext': 'mp4', 'title': 'Force Surprise – LEGO® Star Wars™ Microfighters', 'description': 'md5:9c673c96ce6f6271b88563fe9dc56de3', }, 'params': { 'skip_download': True, }, }] _BITRATES = [256, 512, 1024, 1536, 2560] def _real_extract(self, url): locale, video_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, video_id) title = get_element_by_class('video-header', webpage).strip() progressive_base = 'https://lc-mediaplayerns-live-s.legocdn.com/' streaming_base = 'http://legoprod-f.akamaihd.net/' content_url = self._html_search_meta('contentUrl', webpage) path = self._search_regex( r'(?:https?:)?//[^/]+/(?:[iz]/s/)?public/(.+)_[0-9,]+\.(?:mp4|webm)', content_url, 'video path', default=None) if not path: player_url = self._proto_relative_url(self._search_regex( r'<iframe[^>]+src="((?:https?)?//(?:www\.)?lego\.com/[^/]+/mediaplayer/video/[^"]+)', webpage, 'player url', default=None)) if not player_url: base_url = self._proto_relative_url(self._search_regex( r'data-baseurl="([^"]+)"', webpage, 'base url', default='http://www.lego.com/%s/mediaplayer/video/' % locale)) player_url = base_url + video_id player_webpage = self._download_webpage(player_url, video_id) video_data = self._parse_json(unescapeHTML(self._search_regex( r"video='([^']+)'", player_webpage, 'video data')), video_id) progressive_base = self._search_regex( r'data-video-progressive-url="([^"]+)"', player_webpage, 'progressive base', default='https://lc-mediaplayerns-live-s.legocdn.com/') streaming_base = self._search_regex( r'data-video-streaming-url="([^"]+)"', player_webpage, 'streaming base', default='http://legoprod-f.akamaihd.net/') item_id = video_data['ItemId'] net_storage_path = video_data.get('NetStoragePath') or '/'.join([item_id[:2], item_id[2:4]]) base_path = '_'.join([item_id, video_data['VideoId'], video_data['Locale'], compat_str(video_data['VideoVersion'])]) path = '/'.join([net_storage_path, base_path]) streaming_path = ','.join(map(lambda bitrate: compat_str(bitrate), self._BITRATES)) formats = self._extract_akamai_formats( '%si/s/public/%s_,%s,.mp4.csmil/master.m3u8' % (streaming_base, path, streaming_path), video_id) m3u8_formats = list(filter( lambda f: f.get('protocol') == 'm3u8_native' and f.get('vcodec') != 'none', formats)) if len(m3u8_formats) == len(self._BITRATES): self._sort_formats(m3u8_formats) for bitrate, m3u8_format in zip(self._BITRATES, m3u8_formats): progressive_base_url = '%spublic/%s_%d.' % (progressive_base, path, bitrate) mp4_f = m3u8_format.copy() mp4_f.update({ 'url': progressive_base_url + 'mp4', 'format_id': m3u8_format['format_id'].replace('hls', 'mp4'), 'protocol': 'http', }) web_f = { 'url': progressive_base_url + 'webm', 'format_id': m3u8_format['format_id'].replace('hls', 'webm'), 'width': m3u8_format['width'], 'height': m3u8_format['height'], 'tbr': m3u8_format.get('tbr'), 'ext': 'webm', } formats.extend([web_f, mp4_f]) else: for bitrate in self._BITRATES: for ext in ('web', 'mp4'): formats.append({ 'format_id': '%s-%s' % (ext, bitrate), 'url': '%spublic/%s_%d.%s' % (progressive_base, path, bitrate, ext), 'tbr': bitrate, 'ext': ext, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': self._html_search_meta('description', webpage), 'thumbnail': self._html_search_meta('thumbnail', webpage), 'duration': parse_duration(self._html_search_meta('duration', webpage)), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/lemonde.py
from __future__ import unicode_literals from .common import InfoExtractor class LemondeIE(InfoExtractor): _VALID_URL = r'https?://(?:.+?\.)?lemonde\.fr/(?:[^/]+/)*(?P<id>[^/]+)\.html' _TESTS = [{ 'url': 'http://www.lemonde.fr/police-justice/video/2016/01/19/comprendre-l-affaire-bygmalion-en-cinq-minutes_4849702_1653578.html', 'md5': 'da120c8722d8632eec6ced937536cc98', 'info_dict': { 'id': 'lqm3kl', 'ext': 'mp4', 'title': "Comprendre l'affaire Bygmalion en 5 minutes", 'thumbnail': r're:^https?://.*\.jpg', 'duration': 309, 'upload_date': '20160119', 'timestamp': 1453194778, 'uploader_id': '3pmkp', }, }, { # standard iframe embed 'url': 'http://www.lemonde.fr/les-decodeurs/article/2016/10/18/tout-comprendre-du-ceta-le-petit-cousin-du-traite-transatlantique_5015920_4355770.html', 'info_dict': { 'id': 'uzsxms', 'ext': 'mp4', 'title': "CETA : quelles suites pour l'accord commercial entre l'Europe et le Canada ?", 'thumbnail': r're:^https?://.*\.jpg', 'duration': 325, 'upload_date': '20161021', 'timestamp': 1477044540, 'uploader_id': '3pmkp', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://redaction.actu.lemonde.fr/societe/video/2016/01/18/calais-debut-des-travaux-de-defrichement-dans-la-jungle_4849233_3224.html', 'only_matching': True, }, { # YouTube embeds 'url': 'http://www.lemonde.fr/pixels/article/2016/12/09/pourquoi-pewdiepie-superstar-de-youtube-a-menace-de-fermer-sa-chaine_5046649_4408996.html', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) digiteka_url = self._proto_relative_url(self._search_regex( r'url\s*:\s*(["\'])(?P<url>(?:https?://)?//(?:www\.)?(?:digiteka\.net|ultimedia\.com)/deliver/.+?)\1', webpage, 'digiteka url', group='url', default=None)) if digiteka_url: return self.url_result(digiteka_url, 'Digiteka') return self.url_result(url, 'Generic')
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/lenta.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class LentaIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?lenta\.ru/[^/]+/\d+/\d+/\d+/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://lenta.ru/news/2018/03/22/savshenko_go/', 'info_dict': { 'id': '964400', 'ext': 'mp4', 'title': 'Надежду Савченко задержали', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 61, 'view_count': int, }, 'params': { 'skip_download': True, }, }, { # EaglePlatform iframe embed 'url': 'http://lenta.ru/news/2015/03/06/navalny/', 'info_dict': { 'id': '227304', 'ext': 'mp4', 'title': 'Навальный вышел на свободу', 'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 87, 'view_count': int, 'age_limit': 0, }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._search_regex( r'vid\s*:\s*["\']?(\d+)', webpage, 'eagleplatform id', default=None) if video_id: return self.url_result( 'eagleplatform:lentaru.media.eagleplatform.com:%s' % video_id, ie='EaglePlatform', video_id=video_id) return self.url_result(url, ie='Generic')
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/libraryofcongress.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, float_or_none, int_or_none, parse_filesize, ) class LibraryOfCongressIE(InfoExtractor): IE_NAME = 'loc' IE_DESC = 'Library of Congress' _VALID_URL = r'https?://(?:www\.)?loc\.gov/(?:item/|today/cyberlc/feature_wdesc\.php\?.*\brec=)(?P<id>[0-9a-z_.]+)' _TESTS = [{ # embedded via <div class="media-player" 'url': 'http://loc.gov/item/90716351/', 'md5': '6ec0ae8f07f86731b1b2ff70f046210a', 'info_dict': { 'id': '90716351', 'ext': 'mp4', 'title': "Pa's trip to Mars", 'duration': 0, 'view_count': int, }, }, { # webcast embedded via mediaObjectId 'url': 'https://www.loc.gov/today/cyberlc/feature_wdesc.php?rec=5578', 'info_dict': { 'id': '5578', 'ext': 'mp4', 'title': 'Help! Preservation Training Needs Here, There & Everywhere', 'duration': 3765, 'view_count': int, 'subtitles': 'mincount:1', }, 'params': { 'skip_download': True, }, }, { # with direct download links 'url': 'https://www.loc.gov/item/78710669/', 'info_dict': { 'id': '78710669', 'ext': 'mp4', 'title': 'La vie et la passion de Jesus-Christ', 'duration': 0, 'view_count': int, 'formats': 'mincount:4', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.loc.gov/item/ihas.200197114/', 'only_matching': True, }, { 'url': 'https://www.loc.gov/item/afc1981005_afs20503/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) media_id = self._search_regex( (r'id=(["\'])media-player-(?P<id>.+?)\1', r'<video[^>]+id=(["\'])uuid-(?P<id>.+?)\1', r'<video[^>]+data-uuid=(["\'])(?P<id>.+?)\1', r'mediaObjectId\s*:\s*(["\'])(?P<id>.+?)\1', r'data-tab="share-media-(?P<id>[0-9A-F]{32})"'), webpage, 'media id', group='id') data = self._download_json( 'https://media.loc.gov/services/v1/media?id=%s&context=json' % media_id, media_id)['mediaObject'] derivative = data['derivatives'][0] media_url = derivative['derivativeUrl'] title = derivative.get('shortName') or data.get('shortName') or self._og_search_title( webpage) # Following algorithm was extracted from setAVSource js function # found in webpage media_url = media_url.replace('rtmp', 'https') is_video = data.get('mediaType', 'v').lower() == 'v' ext = determine_ext(media_url) if ext not in ('mp4', 'mp3'): media_url += '.mp4' if is_video else '.mp3' formats = [] if '/vod/mp4:' in media_url: formats.append({ 'url': media_url.replace('/vod/mp4:', '/hls-vod/media/') + '.m3u8', 'format_id': 'hls', 'ext': 'mp4', 'protocol': 'm3u8_native', 'quality': 1, }) http_format = { 'url': re.sub(r'(://[^/]+/)(?:[^/]+/)*(?:mp4|mp3):', r'\1', media_url), 'format_id': 'http', 'quality': 1, } if not is_video: http_format['vcodec'] = 'none' formats.append(http_format) download_urls = set() for m in re.finditer( r'<option[^>]+value=(["\'])(?P<url>.+?)\1[^>]+data-file-download=[^>]+>\s*(?P<id>.+?)(?:(?:&nbsp;|\s+)\((?P<size>.+?)\))?\s*<', webpage): format_id = m.group('id').lower() if format_id in ('gif', 'jpeg'): continue download_url = m.group('url') if download_url in download_urls: continue download_urls.add(download_url) formats.append({ 'url': download_url, 'format_id': format_id, 'filesize_approx': parse_filesize(m.group('size')), }) self._sort_formats(formats) duration = float_or_none(data.get('duration')) view_count = int_or_none(data.get('viewCount')) subtitles = {} cc_url = data.get('ccUrl') if cc_url: subtitles.setdefault('en', []).append({ 'url': cc_url, 'ext': 'ttml', }) return { 'id': video_id, 'title': title, 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'duration': duration, 'view_count': view_count, 'formats': formats, 'subtitles': subtitles, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/libsyn.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, get_element_by_class, parse_duration, strip_or_none, unified_strdate, ) class LibsynIE(InfoExtractor): _VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))' _TESTS = [{ 'url': 'http://html5-player.libsyn.com/embed/episode/id/6385796/', 'md5': '2a55e75496c790cdeb058e7e6c087746', 'info_dict': { 'id': '6385796', 'ext': 'mp3', 'title': "Champion Minded - Developing a Growth Mindset", # description fetched using another request: # http://html5-player.libsyn.com/embed/getitemdetails?item_id=6385796 # 'description': 'In this episode, Allistair talks about the importance of developing a growth mindset, not only in sports, but in life too.', 'upload_date': '20180320', 'thumbnail': 're:^https?://.*', }, }, { 'url': 'https://html5-player.libsyn.com/embed/episode/id/3727166/height/75/width/200/theme/standard/direction/no/autoplay/no/autonext/no/thumbnail/no/preload/no/no_addthis/no/', 'md5': '6c5cb21acd622d754d3b1a92b582ce42', 'info_dict': { 'id': '3727166', 'ext': 'mp3', 'title': 'Clients From Hell Podcast - How a Sex Toy Company Kickstarted my Freelance Career', 'upload_date': '20150818', 'thumbnail': 're:^https?://.*', } }] def _real_extract(self, url): url, video_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, video_id) data = self._parse_json(self._search_regex( r'var\s+playlistItem\s*=\s*({.+?});', webpage, 'JSON data block'), video_id) episode_title = data.get('item_title') or get_element_by_class('episode-title', webpage) if not episode_title: self._search_regex( [r'data-title="([^"]+)"', r'<title>(.+?)</title>'], webpage, 'episode title') episode_title = episode_title.strip() podcast_title = strip_or_none(clean_html(self._search_regex( r'<h3>([^<]+)</h3>', webpage, 'podcast title', default=None) or get_element_by_class('podcast-title', webpage))) title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title formats = [] for k, format_id in (('media_url_libsyn', 'libsyn'), ('media_url', 'main'), ('download_link', 'download')): f_url = data.get(k) if not f_url: continue formats.append({ 'url': f_url, 'format_id': format_id, }) description = self._html_search_regex( r'<p\s+id="info_text_body">(.+?)</p>', webpage, 'description', default=None) if description: # Strip non-breaking and normal spaces description = description.replace('\u00A0', ' ').strip() release_date = unified_strdate(self._search_regex( r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', default=None) or data.get('release_date')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': data.get('thumbnail_url'), 'upload_date': release_date, 'duration': parse_duration(data.get('duration')), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/lifenews.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( determine_ext, ExtractorError, int_or_none, parse_iso8601, remove_end, ) class LifeNewsIE(InfoExtractor): IE_NAME = 'life' IE_DESC = 'Life.ru' _VALID_URL = r'https?://life\.ru/t/[^/]+/(?P<id>\d+)' _TESTS = [{ # single video embedded via video/source 'url': 'https://life.ru/t/новости/98736', 'md5': '77c95eaefaca216e32a76a343ad89d23', 'info_dict': { 'id': '98736', 'ext': 'mp4', 'title': 'Мужчина нашел дома архив оборонного завода', 'description': 'md5:3b06b1b39b5e2bea548e403d99b8bf26', 'timestamp': 1344154740, 'upload_date': '20120805', 'view_count': int, } }, { # single video embedded via iframe 'url': 'https://life.ru/t/новости/152125', 'md5': '77d19a6f0886cd76bdbf44b4d971a273', 'info_dict': { 'id': '152125', 'ext': 'mp4', 'title': 'В Сети появилось видео захвата «Правым сектором» колхозных полей ', 'description': 'Жители двух поселков Днепропетровской области не простили радикалам угрозу лишения плодородных земель и пошли в лобовую. ', 'timestamp': 1427961840, 'upload_date': '20150402', 'view_count': int, } }, { # two videos embedded via iframe 'url': 'https://life.ru/t/новости/153461', 'info_dict': { 'id': '153461', 'title': 'В Москве спасли потерявшегося медвежонка, который спрятался на дереве', 'description': 'Маленький хищник не смог найти дорогу домой и обрел временное убежище на тополе недалеко от жилого массива, пока его не нашла соседская собака.', 'timestamp': 1430825520, 'view_count': int, }, 'playlist': [{ 'md5': '9b6ef8bc0ffa25aebc8bdb40d89ab795', 'info_dict': { 'id': '153461-video1', 'ext': 'mp4', 'title': 'В Москве спасли потерявшегося медвежонка, который спрятался на дереве (Видео 1)', 'description': 'Маленький хищник не смог найти дорогу домой и обрел временное убежище на тополе недалеко от жилого массива, пока его не нашла соседская собака.', 'timestamp': 1430825520, 'upload_date': '20150505', }, }, { 'md5': 'ebb3bf3b1ce40e878d0d628e93eb0322', 'info_dict': { 'id': '153461-video2', 'ext': 'mp4', 'title': 'В Москве спасли потерявшегося медвежонка, который спрятался на дереве (Видео 2)', 'description': 'Маленький хищник не смог найти дорогу домой и обрел временное убежище на тополе недалеко от жилого массива, пока его не нашла соседская собака.', 'timestamp': 1430825520, 'upload_date': '20150505', }, }], }, { 'url': 'https://life.ru/t/новости/213035', 'only_matching': True, }, { 'url': 'https://life.ru/t/%D0%BD%D0%BE%D0%B2%D0%BE%D1%81%D1%82%D0%B8/153461', 'only_matching': True, }, { 'url': 'https://life.ru/t/новости/411489/manuel_vals_nazval_frantsiiu_tsieliu_nomier_odin_dlia_ighil', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_urls = re.findall( r'<video[^>]+><source[^>]+src=["\'](.+?)["\']', webpage) iframe_links = re.findall( r'<iframe[^>]+src=["\']((?:https?:)?//embed\.life\.ru/(?:embed|video)/.+?)["\']', webpage) if not video_urls and not iframe_links: raise ExtractorError('No media links available for %s' % video_id) title = remove_end( self._og_search_title(webpage), ' - Life.ru') description = self._og_search_description(webpage) view_count = self._html_search_regex( r'<div[^>]+class=(["\']).*?\bhits-count\b.*?\1[^>]*>\s*(?P<value>\d+)\s*</div>', webpage, 'view count', fatal=False, group='value') timestamp = parse_iso8601(self._search_regex( r'<time[^>]+datetime=(["\'])(?P<value>.+?)\1', webpage, 'upload date', fatal=False, group='value')) common_info = { 'description': description, 'view_count': int_or_none(view_count), 'timestamp': timestamp, } def make_entry(video_id, video_url, index=None): cur_info = dict(common_info) cur_info.update({ 'id': video_id if not index else '%s-video%s' % (video_id, index), 'url': video_url, 'title': title if not index else '%s (Видео %s)' % (title, index), }) return cur_info def make_video_entry(video_id, video_url, index=None): video_url = compat_urlparse.urljoin(url, video_url) return make_entry(video_id, video_url, index) def make_iframe_entry(video_id, video_url, index=None): video_url = self._proto_relative_url(video_url, 'http:') cur_info = make_entry(video_id, video_url, index) cur_info['_type'] = 'url_transparent' return cur_info if len(video_urls) == 1 and not iframe_links: return make_video_entry(video_id, video_urls[0]) if len(iframe_links) == 1 and not video_urls: return make_iframe_entry(video_id, iframe_links[0]) entries = [] if video_urls: for num, video_url in enumerate(video_urls, 1): entries.append(make_video_entry(video_id, video_url, num)) if iframe_links: for num, iframe_link in enumerate(iframe_links, len(video_urls) + 1): entries.append(make_iframe_entry(video_id, iframe_link, num)) playlist = common_info.copy() playlist.update(self.playlist_result(entries, video_id, title, description)) return playlist class LifeEmbedIE(InfoExtractor): IE_NAME = 'life:embed' _VALID_URL = r'https?://embed\.life\.ru/(?:embed|video)/(?P<id>[\da-f]{32})' _TESTS = [{ 'url': 'http://embed.life.ru/embed/e50c2dec2867350528e2574c899b8291', 'md5': 'b889715c9e49cb1981281d0e5458fbbe', 'info_dict': { 'id': 'e50c2dec2867350528e2574c899b8291', 'ext': 'mp4', 'title': 'e50c2dec2867350528e2574c899b8291', 'thumbnail': r're:http://.*\.jpg', } }, { # with 1080p 'url': 'https://embed.life.ru/video/e50c2dec2867350528e2574c899b8291', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) thumbnail = None formats = [] def extract_m3u8(manifest_url): formats.extend(self._extract_m3u8_formats( manifest_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='m3u8')) def extract_original(original_url): formats.append({ 'url': original_url, 'format_id': determine_ext(original_url, None), 'preference': 1, }) playlist = self._parse_json( self._search_regex( r'options\s*=\s*({.+?});', webpage, 'options', default='{}'), video_id).get('playlist', {}) if playlist: master = playlist.get('master') if isinstance(master, compat_str) and determine_ext(master) == 'm3u8': extract_m3u8(compat_urlparse.urljoin(url, master)) original = playlist.get('original') if isinstance(original, compat_str): extract_original(original) thumbnail = playlist.get('image') # Old rendition fallback if not formats: for video_url in re.findall(r'"file"\s*:\s*"([^"]+)', webpage): video_url = compat_urlparse.urljoin(url, video_url) if determine_ext(video_url) == 'm3u8': extract_m3u8(video_url) else: extract_original(video_url) self._sort_formats(formats) thumbnail = thumbnail or self._search_regex( r'"image"\s*:\s*"([^"]+)', webpage, 'thumbnail', default=None) return { 'id': video_id, 'title': video_id, 'thumbnail': thumbnail, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/limelight.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( determine_ext, float_or_none, int_or_none, smuggle_url, try_get, unsmuggle_url, ExtractorError, ) class LimelightBaseIE(InfoExtractor): _PLAYLIST_SERVICE_URL = 'http://production-ps.lvp.llnw.net/r/PlaylistService/%s/%s/%s' _API_URL = 'http://api.video.limelight.com/rest/organizations/%s/%s/%s/%s.json' @classmethod def _extract_urls(cls, webpage, source_url): lm = { 'Media': 'media', 'Channel': 'channel', 'ChannelList': 'channel_list', } def smuggle(url): return smuggle_url(url, {'source_url': source_url}) entries = [] for kind, video_id in re.findall( r'LimelightPlayer\.doLoad(Media|Channel|ChannelList)\(["\'](?P<id>[a-z0-9]{32})', webpage): entries.append(cls.url_result( smuggle('limelight:%s:%s' % (lm[kind], video_id)), 'Limelight%s' % kind, video_id)) for mobj in re.finditer( # As per [1] class attribute should be exactly equal to # LimelightEmbeddedPlayerFlash but numerous examples seen # that don't exactly match it (e.g. [2]). # 1. http://support.3playmedia.com/hc/en-us/articles/227732408-Limelight-Embedding-the-Captions-Plugin-with-the-Limelight-Player-on-Your-Webpage # 2. http://www.sedona.com/FacilitatorTraining2017 r'''(?sx) <object[^>]+class=(["\'])(?:(?!\1).)*\bLimelightEmbeddedPlayerFlash\b(?:(?!\1).)*\1[^>]*>.*? <param[^>]+ name=(["\'])flashVars\2[^>]+ value=(["\'])(?:(?!\3).)*(?P<kind>media|channel(?:List)?)Id=(?P<id>[a-z0-9]{32}) ''', webpage): kind, video_id = mobj.group('kind'), mobj.group('id') entries.append(cls.url_result( smuggle('limelight:%s:%s' % (kind, video_id)), 'Limelight%s' % kind.capitalize(), video_id)) # http://support.3playmedia.com/hc/en-us/articles/115009517327-Limelight-Embedding-the-Audio-Description-Plugin-with-the-Limelight-Player-on-Your-Web-Page) for video_id in re.findall( r'(?s)LimelightPlayerUtil\.embed\s*\(\s*{.*?\bmediaId["\']\s*:\s*["\'](?P<id>[a-z0-9]{32})', webpage): entries.append(cls.url_result( smuggle('limelight:media:%s' % video_id), LimelightMediaIE.ie_key(), video_id)) return entries def _call_playlist_service(self, item_id, method, fatal=True, referer=None): headers = {} if referer: headers['Referer'] = referer try: return self._download_json( self._PLAYLIST_SERVICE_URL % (self._PLAYLIST_SERVICE_PATH, item_id, method), item_id, 'Downloading PlaylistService %s JSON' % method, fatal=fatal, headers=headers) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: error = self._parse_json(e.cause.read().decode(), item_id)['detail']['contentAccessPermission'] if error == 'CountryDisabled': self.raise_geo_restricted() raise ExtractorError(error, expected=True) raise def _call_api(self, organization_id, item_id, method): return self._download_json( self._API_URL % (organization_id, self._API_PATH, item_id, method), item_id, 'Downloading API %s JSON' % method) def _extract(self, item_id, pc_method, mobile_method, meta_method, referer=None): pc = self._call_playlist_service(item_id, pc_method, referer=referer) metadata = self._call_api(pc['orgId'], item_id, meta_method) mobile = self._call_playlist_service(item_id, mobile_method, fatal=False, referer=referer) return pc, mobile, metadata def _extract_info(self, streams, mobile_urls, properties): video_id = properties['media_id'] formats = [] urls = [] for stream in streams: stream_url = stream.get('url') if not stream_url or stream.get('drmProtected') or stream_url in urls: continue urls.append(stream_url) ext = determine_ext(stream_url) if ext == 'f4m': formats.extend(self._extract_f4m_formats( stream_url, video_id, f4m_id='hds', fatal=False)) else: fmt = { 'url': stream_url, 'abr': float_or_none(stream.get('audioBitRate')), 'fps': float_or_none(stream.get('videoFrameRate')), 'ext': ext, } width = int_or_none(stream.get('videoWidthInPixels')) height = int_or_none(stream.get('videoHeightInPixels')) vbr = float_or_none(stream.get('videoBitRate')) if width or height or vbr: fmt.update({ 'width': width, 'height': height, 'vbr': vbr, }) else: fmt['vcodec'] = 'none' rtmp = re.search(r'^(?P<url>rtmpe?://(?P<host>[^/]+)/(?P<app>.+))/(?P<playpath>mp[34]:.+)$', stream_url) if rtmp: format_id = 'rtmp' if stream.get('videoBitRate'): format_id += '-%d' % int_or_none(stream['videoBitRate']) http_format_id = format_id.replace('rtmp', 'http') CDN_HOSTS = ( ('delvenetworks.com', 'cpl.delvenetworks.com'), ('video.llnw.net', 's2.content.video.llnw.net'), ) for cdn_host, http_host in CDN_HOSTS: if cdn_host not in rtmp.group('host').lower(): continue http_url = 'http://%s/%s' % (http_host, rtmp.group('playpath')[4:]) urls.append(http_url) if self._is_valid_url(http_url, video_id, http_format_id): http_fmt = fmt.copy() http_fmt.update({ 'url': http_url, 'format_id': http_format_id, }) formats.append(http_fmt) break fmt.update({ 'url': rtmp.group('url'), 'play_path': rtmp.group('playpath'), 'app': rtmp.group('app'), 'ext': 'flv', 'format_id': format_id, }) formats.append(fmt) for mobile_url in mobile_urls: media_url = mobile_url.get('mobileUrl') format_id = mobile_url.get('targetMediaPlatform') if not media_url or format_id in ('Widevine', 'SmoothStreaming') or media_url in urls: continue urls.append(media_url) ext = determine_ext(media_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( media_url, video_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( stream_url, video_id, f4m_id=format_id, fatal=False)) else: formats.append({ 'url': media_url, 'format_id': format_id, 'preference': -1, 'ext': ext, }) self._sort_formats(formats) title = properties['title'] description = properties.get('description') timestamp = int_or_none(properties.get('publish_date') or properties.get('create_date')) duration = float_or_none(properties.get('duration_in_milliseconds'), 1000) filesize = int_or_none(properties.get('total_storage_in_bytes')) categories = [properties.get('category')] tags = properties.get('tags', []) thumbnails = [{ 'url': thumbnail['url'], 'width': int_or_none(thumbnail.get('width')), 'height': int_or_none(thumbnail.get('height')), } for thumbnail in properties.get('thumbnails', []) if thumbnail.get('url')] subtitles = {} for caption in properties.get('captions', []): lang = caption.get('language_code') subtitles_url = caption.get('url') if lang and subtitles_url: subtitles.setdefault(lang, []).append({ 'url': subtitles_url, }) closed_captions_url = properties.get('closed_captions_url') if closed_captions_url: subtitles.setdefault('en', []).append({ 'url': closed_captions_url, 'ext': 'ttml', }) return { 'id': video_id, 'title': title, 'description': description, 'formats': formats, 'timestamp': timestamp, 'duration': duration, 'filesize': filesize, 'categories': categories, 'tags': tags, 'thumbnails': thumbnails, 'subtitles': subtitles, } def _extract_info_helper(self, pc, mobile, i, metadata): return self._extract_info( try_get(pc, lambda x: x['playlistItems'][i]['streams'], list) or [], try_get(mobile, lambda x: x['mediaList'][i]['mobileUrls'], list) or [], metadata) class LimelightMediaIE(LimelightBaseIE): IE_NAME = 'limelight' _VALID_URL = r'''(?x) (?: limelight:media:| https?:// (?: link\.videoplatform\.limelight\.com/media/| assets\.delvenetworks\.com/player/loader\.swf ) \?.*?\bmediaId= ) (?P<id>[a-z0-9]{32}) ''' _TESTS = [{ 'url': 'http://link.videoplatform.limelight.com/media/?mediaId=3ffd040b522b4485b6d84effc750cd86', 'info_dict': { 'id': '3ffd040b522b4485b6d84effc750cd86', 'ext': 'mp4', 'title': 'HaP and the HB Prince Trailer', 'description': 'md5:8005b944181778e313d95c1237ddb640', 'thumbnail': r're:^https?://.*\.jpeg$', 'duration': 144.23, 'timestamp': 1244136834, 'upload_date': '20090604', }, 'params': { # m3u8 download 'skip_download': True, }, }, { # video with subtitles 'url': 'limelight:media:a3e00274d4564ec4a9b29b9466432335', 'md5': '2fa3bad9ac321e23860ca23bc2c69e3d', 'info_dict': { 'id': 'a3e00274d4564ec4a9b29b9466432335', 'ext': 'mp4', 'title': '3Play Media Overview Video', 'thumbnail': r're:^https?://.*\.jpeg$', 'duration': 78.101, 'timestamp': 1338929955, 'upload_date': '20120605', 'subtitles': 'mincount:9', }, }, { 'url': 'https://assets.delvenetworks.com/player/loader.swf?mediaId=8018a574f08d416e95ceaccae4ba0452', 'only_matching': True, }] _PLAYLIST_SERVICE_PATH = 'media' _API_PATH = 'media' def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) video_id = self._match_id(url) self._initialize_geo_bypass({ 'countries': smuggled_data.get('geo_countries'), }) pc, mobile, metadata = self._extract( video_id, 'getPlaylistByMediaId', 'getMobilePlaylistByMediaId', 'properties', smuggled_data.get('source_url')) return self._extract_info_helper(pc, mobile, 0, metadata) class LimelightChannelIE(LimelightBaseIE): IE_NAME = 'limelight:channel' _VALID_URL = r'''(?x) (?: limelight:channel:| https?:// (?: link\.videoplatform\.limelight\.com/media/| assets\.delvenetworks\.com/player/loader\.swf ) \?.*?\bchannelId= ) (?P<id>[a-z0-9]{32}) ''' _TESTS = [{ 'url': 'http://link.videoplatform.limelight.com/media/?channelId=ab6a524c379342f9b23642917020c082', 'info_dict': { 'id': 'ab6a524c379342f9b23642917020c082', 'title': 'Javascript Sample Code', }, 'playlist_mincount': 3, }, { 'url': 'http://assets.delvenetworks.com/player/loader.swf?channelId=ab6a524c379342f9b23642917020c082', 'only_matching': True, }] _PLAYLIST_SERVICE_PATH = 'channel' _API_PATH = 'channels' def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) channel_id = self._match_id(url) pc, mobile, medias = self._extract( channel_id, 'getPlaylistByChannelId', 'getMobilePlaylistWithNItemsByChannelId?begin=0&count=-1', 'media', smuggled_data.get('source_url')) entries = [ self._extract_info_helper(pc, mobile, i, medias['media_list'][i]) for i in range(len(medias['media_list']))] return self.playlist_result(entries, channel_id, pc['title']) class LimelightChannelListIE(LimelightBaseIE): IE_NAME = 'limelight:channel_list' _VALID_URL = r'''(?x) (?: limelight:channel_list:| https?:// (?: link\.videoplatform\.limelight\.com/media/| assets\.delvenetworks\.com/player/loader\.swf ) \?.*?\bchannelListId= ) (?P<id>[a-z0-9]{32}) ''' _TESTS = [{ 'url': 'http://link.videoplatform.limelight.com/media/?channelListId=301b117890c4465c8179ede21fd92e2b', 'info_dict': { 'id': '301b117890c4465c8179ede21fd92e2b', 'title': 'Website - Hero Player', }, 'playlist_mincount': 2, }, { 'url': 'https://assets.delvenetworks.com/player/loader.swf?channelListId=301b117890c4465c8179ede21fd92e2b', 'only_matching': True, }] _PLAYLIST_SERVICE_PATH = 'channel_list' def _real_extract(self, url): channel_list_id = self._match_id(url) channel_list = self._call_playlist_service(channel_list_id, 'getMobileChannelListById') entries = [ self.url_result('limelight:channel:%s' % channel['id'], 'LimelightChannel') for channel in channel_list['channelList']] return self.playlist_result(entries, channel_list_id, channel_list['title'])
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/line.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import js_to_json class LineTVIE(InfoExtractor): _VALID_URL = r'https?://tv\.line\.me/v/(?P<id>\d+)_[^/]+-(?P<segment>ep\d+-\d+)' _TESTS = [{ 'url': 'https://tv.line.me/v/793123_goodbye-mrblack-ep1-1/list/69246', 'info_dict': { 'id': '793123_ep1-1', 'ext': 'mp4', 'title': 'Goodbye Mr.Black | EP.1-1', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 998.509, 'view_count': int, }, }, { 'url': 'https://tv.line.me/v/2587507_%E6%B4%BE%E9%81%A3%E5%A5%B3%E9%86%ABx-ep1-02/list/185245', 'only_matching': True, }] def _real_extract(self, url): series_id, segment = re.match(self._VALID_URL, url).groups() video_id = '%s_%s' % (series_id, segment) webpage = self._download_webpage(url, video_id) player_params = self._parse_json(self._search_regex( r'naver\.WebPlayer\(({[^}]+})\)', webpage, 'player parameters'), video_id, transform_source=js_to_json) video_info = self._download_json( 'https://global-nvapis.line.me/linetv/rmcnmv/vod_play_videoInfo.json', video_id, query={ 'videoId': player_params['videoId'], 'key': player_params['key'], }) stream = video_info['streams'][0] extra_query = '?__gda__=' + stream['key']['value'] formats = self._extract_m3u8_formats( stream['source'] + extra_query, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls') for a_format in formats: a_format['url'] += extra_query duration = None for video in video_info.get('videos', {}).get('list', []): encoding_option = video.get('encodingOption', {}) abr = video['bitrate']['audio'] vbr = video['bitrate']['video'] tbr = abr + vbr formats.append({ 'url': video['source'], 'format_id': 'http-%d' % int(tbr), 'height': encoding_option.get('height'), 'width': encoding_option.get('width'), 'abr': abr, 'vbr': vbr, 'filesize': video.get('size'), }) if video.get('duration') and duration is None: duration = video['duration'] self._sort_formats(formats) if not formats[0].get('width'): formats[0]['vcodec'] = 'none' title = self._og_search_title(webpage) # like_count requires an additional API request https://tv.line.me/api/likeit/getCount return { 'id': video_id, 'title': title, 'formats': formats, 'extra_param_to_segment_url': extra_query[1:], 'duration': duration, 'thumbnails': [{'url': thumbnail['source']} for thumbnail in video_info.get('thumbnails', {}).get('list', [])], 'view_count': video_info.get('meta', {}).get('count'), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/linkedin.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, urlencode_postdata, urljoin, ) class LinkedInLearningBaseIE(InfoExtractor): _NETRC_MACHINE = 'linkedin' _LOGIN_URL = 'https://www.linkedin.com/uas/login?trk=learning' def _call_api(self, course_slug, fields, video_slug=None, resolution=None): query = { 'courseSlug': course_slug, 'fields': fields, 'q': 'slugs', } sub = '' if video_slug: query.update({ 'videoSlug': video_slug, 'resolution': '_%s' % resolution, }) sub = ' %dp' % resolution api_url = 'https://www.linkedin.com/learning-api/detailedCourses' return self._download_json( api_url, video_slug, 'Downloading%s JSON metadata' % sub, headers={ 'Csrf-Token': self._get_cookies(api_url)['JSESSIONID'].value, }, query=query)['elements'][0] def _get_urn_id(self, video_data): urn = video_data.get('urn') if urn: mobj = re.search(r'urn:li:lyndaCourse:\d+,(\d+)', urn) if mobj: return mobj.group(1) def _get_video_id(self, video_data, course_slug, video_slug): return self._get_urn_id(video_data) or '%s/%s' % (course_slug, video_slug) def _real_initialize(self): email, password = self._get_login_info() if email is None: return login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading login page') action_url = urljoin(self._LOGIN_URL, self._search_regex( r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page, 'post url', default='https://www.linkedin.com/uas/login-submit', group='url')) data = self._hidden_inputs(login_page) data.update({ 'session_key': email, 'session_password': password, }) login_submit_page = self._download_webpage( action_url, None, 'Logging in', data=urlencode_postdata(data)) error = self._search_regex( r'<span[^>]+class="error"[^>]*>\s*(.+?)\s*</span>', login_submit_page, 'error', default=None) if error: raise ExtractorError(error, expected=True) class LinkedInLearningIE(LinkedInLearningBaseIE): IE_NAME = 'linkedin:learning' _VALID_URL = r'https?://(?:www\.)?linkedin\.com/learning/(?P<course_slug>[^/]+)/(?P<id>[^/?#]+)' _TEST = { 'url': 'https://www.linkedin.com/learning/programming-foundations-fundamentals/welcome?autoplay=true', 'md5': 'a1d74422ff0d5e66a792deb996693167', 'info_dict': { 'id': '90426', 'ext': 'mp4', 'title': 'Welcome', 'timestamp': 1430396150.82, 'upload_date': '20150430', }, } def _real_extract(self, url): course_slug, video_slug = re.match(self._VALID_URL, url).groups() video_data = None formats = [] for width, height in ((640, 360), (960, 540), (1280, 720)): video_data = self._call_api( course_slug, 'selectedVideo', video_slug, height)['selectedVideo'] video_url_data = video_data.get('url') or {} progressive_url = video_url_data.get('progressiveUrl') if progressive_url: formats.append({ 'format_id': 'progressive-%dp' % height, 'url': progressive_url, 'height': height, 'width': width, 'source_preference': 1, }) title = video_data['title'] audio_url = video_data.get('audio', {}).get('progressiveUrl') if audio_url: formats.append({ 'abr': 64, 'ext': 'm4a', 'format_id': 'audio', 'url': audio_url, 'vcodec': 'none', }) streaming_url = video_url_data.get('streamingUrl') if streaming_url: formats.extend(self._extract_m3u8_formats( streaming_url, video_slug, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) self._sort_formats(formats, ('width', 'height', 'source_preference', 'tbr', 'abr')) return { 'id': self._get_video_id(video_data, course_slug, video_slug), 'title': title, 'formats': formats, 'thumbnail': video_data.get('defaultThumbnail'), 'timestamp': float_or_none(video_data.get('publishedOn'), 1000), 'duration': int_or_none(video_data.get('durationInSeconds')), } class LinkedInLearningCourseIE(LinkedInLearningBaseIE): IE_NAME = 'linkedin:learning:course' _VALID_URL = r'https?://(?:www\.)?linkedin\.com/learning/(?P<id>[^/?#]+)' _TEST = { 'url': 'https://www.linkedin.com/learning/programming-foundations-fundamentals', 'info_dict': { 'id': 'programming-foundations-fundamentals', 'title': 'Programming Foundations: Fundamentals', 'description': 'md5:76e580b017694eb89dc8e8923fff5c86', }, 'playlist_mincount': 61, } @classmethod def suitable(cls, url): return False if LinkedInLearningIE.suitable(url) else super(LinkedInLearningCourseIE, cls).suitable(url) def _real_extract(self, url): course_slug = self._match_id(url) course_data = self._call_api(course_slug, 'chapters,description,title') entries = [] for chapter_number, chapter in enumerate(course_data.get('chapters', []), 1): chapter_title = chapter.get('title') chapter_id = self._get_urn_id(chapter) for video in chapter.get('videos', []): video_slug = video.get('slug') if not video_slug: continue entries.append({ '_type': 'url_transparent', 'id': self._get_video_id(video, course_slug, video_slug), 'title': video.get('title'), 'url': 'https://www.linkedin.com/learning/%s/%s' % (course_slug, video_slug), 'chapter': chapter_title, 'chapter_number': chapter_number, 'chapter_id': chapter_id, 'ie_key': LinkedInLearningIE.ie_key(), }) return self.playlist_result( entries, course_slug, course_data.get('title'), course_data.get('description'))
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/linuxacademy.py
from __future__ import unicode_literals import json import random import re from .common import InfoExtractor from ..compat import ( compat_b64decode, compat_HTTPError, compat_str, ) from ..utils import ( ExtractorError, orderedSet, unescapeHTML, urlencode_postdata, urljoin, ) class LinuxAcademyIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:www\.)?linuxacademy\.com/cp/ (?: courses/lesson/course/(?P<chapter_id>\d+)/lesson/(?P<lesson_id>\d+)| modules/view/id/(?P<course_id>\d+) ) ''' _TESTS = [{ 'url': 'https://linuxacademy.com/cp/courses/lesson/course/1498/lesson/2/module/154', 'info_dict': { 'id': '1498-2', 'ext': 'mp4', 'title': "Introduction to the Practitioner's Brief", }, 'params': { 'skip_download': True, }, 'skip': 'Requires Linux Academy account credentials', }, { 'url': 'https://linuxacademy.com/cp/courses/lesson/course/1498/lesson/2', 'only_matching': True, }, { 'url': 'https://linuxacademy.com/cp/modules/view/id/154', 'info_dict': { 'id': '154', 'title': 'AWS Certified Cloud Practitioner', 'description': 'md5:039db7e60e4aac9cf43630e0a75fa834', }, 'playlist_count': 41, 'skip': 'Requires Linux Academy account credentials', }] _AUTHORIZE_URL = 'https://login.linuxacademy.com/authorize' _ORIGIN_URL = 'https://linuxacademy.com' _CLIENT_ID = 'KaWxNn1C2Gc7n83W9OFeXltd8Utb5vvx' _NETRC_MACHINE = 'linuxacademy' def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return def random_string(): return ''.join([ random.choice('0123456789ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz-._~') for _ in range(32)]) webpage, urlh = self._download_webpage_handle( self._AUTHORIZE_URL, None, 'Downloading authorize page', query={ 'client_id': self._CLIENT_ID, 'response_type': 'token id_token', 'redirect_uri': self._ORIGIN_URL, 'scope': 'openid email user_impersonation profile', 'audience': self._ORIGIN_URL, 'state': random_string(), 'nonce': random_string(), }) login_data = self._parse_json( self._search_regex( r'atob\(\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'login info', group='value'), None, transform_source=lambda x: compat_b64decode(x).decode('utf-8') )['extraParams'] login_data.update({ 'client_id': self._CLIENT_ID, 'redirect_uri': self._ORIGIN_URL, 'tenant': 'lacausers', 'connection': 'Username-Password-Authentication', 'username': username, 'password': password, 'sso': 'true', }) login_state_url = compat_str(urlh.geturl()) try: login_page = self._download_webpage( 'https://login.linuxacademy.com/usernamepassword/login', None, 'Downloading login page', data=json.dumps(login_data).encode(), headers={ 'Content-Type': 'application/json', 'Origin': 'https://login.linuxacademy.com', 'Referer': login_state_url, }) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: error = self._parse_json(e.cause.read(), None) message = error.get('description') or error['code'] raise ExtractorError( '%s said: %s' % (self.IE_NAME, message), expected=True) raise callback_page, urlh = self._download_webpage_handle( 'https://login.linuxacademy.com/login/callback', None, 'Downloading callback page', data=urlencode_postdata(self._hidden_inputs(login_page)), headers={ 'Content-Type': 'application/x-www-form-urlencoded', 'Origin': 'https://login.linuxacademy.com', 'Referer': login_state_url, }) access_token = self._search_regex( r'access_token=([^=&]+)', compat_str(urlh.geturl()), 'access token') self._download_webpage( 'https://linuxacademy.com/cp/login/tokenValidateLogin/token/%s' % access_token, None, 'Downloading token validation page') def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) chapter_id, lecture_id, course_id = mobj.group('chapter_id', 'lesson_id', 'course_id') item_id = course_id if course_id else '%s-%s' % (chapter_id, lecture_id) webpage = self._download_webpage(url, item_id) # course path if course_id: entries = [ self.url_result( urljoin(url, lesson_url), ie=LinuxAcademyIE.ie_key()) for lesson_url in orderedSet(re.findall( r'<a[^>]+\bhref=["\'](/cp/courses/lesson/course/\d+/lesson/\d+/module/\d+)', webpage))] title = unescapeHTML(self._html_search_regex( (r'class=["\']course-title["\'][^>]*>(?P<value>[^<]+)', r'var\s+title\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1'), webpage, 'title', default=None, group='value')) description = unescapeHTML(self._html_search_regex( r'var\s+description\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'description', default=None, group='value')) return self.playlist_result(entries, course_id, title, description) # single video path info = self._extract_jwplayer_data( webpage, item_id, require_title=False, m3u8_id='hls',) title = self._search_regex( (r'>Lecture\s*:\s*(?P<value>[^<]+)', r'lessonName\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1'), webpage, 'title', group='value') info.update({ 'id': item_id, 'title': title, }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/litv.py
# coding: utf-8 from __future__ import unicode_literals import json from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, smuggle_url, unsmuggle_url, ) class LiTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?litv\.tv/(?:vod|promo)/[^/]+/(?:content\.do)?\?.*?\b(?:content_)?id=(?P<id>[^&]+)' _URL_TEMPLATE = 'https://www.litv.tv/vod/%s/content.do?id=%s' _TESTS = [{ 'url': 'https://www.litv.tv/vod/drama/content.do?brc_id=root&id=VOD00041610&isUHEnabled=true&autoPlay=1', 'info_dict': { 'id': 'VOD00041606', 'title': '花千骨', }, 'playlist_count': 50, }, { 'url': 'https://www.litv.tv/vod/drama/content.do?brc_id=root&id=VOD00041610&isUHEnabled=true&autoPlay=1', 'md5': '969e343d9244778cb29acec608e53640', 'info_dict': { 'id': 'VOD00041610', 'ext': 'mp4', 'title': '花千骨第1集', 'thumbnail': r're:https?://.*\.jpg$', 'description': 'md5:c7017aa144c87467c4fb2909c4b05d6f', 'episode_number': 1, }, 'params': { 'noplaylist': True, }, 'skip': 'Georestricted to Taiwan', }, { 'url': 'https://www.litv.tv/promo/miyuezhuan/?content_id=VOD00044841&', 'md5': '88322ea132f848d6e3e18b32a832b918', 'info_dict': { 'id': 'VOD00044841', 'ext': 'mp4', 'title': '芈月傳第1集 霸星芈月降世楚國', 'description': '楚威王二年,太史令唐昧夜觀星象,發現霸星即將現世。王后得知霸星的預言後,想盡辦法不讓孩子順利出生,幸得莒姬相護化解危機。沒想到眾人期待下出生的霸星卻是位公主,楚威王對此失望至極。楚王后命人將女嬰丟棄河中,居然奇蹟似的被少司命像攔下,楚威王認為此女非同凡響,為她取名芈月。', }, 'skip': 'Georestricted to Taiwan', }] def _extract_playlist(self, season_list, video_id, program_info, prompt=True): episode_title = program_info['title'] content_id = season_list['contentId'] if prompt: self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (content_id, video_id)) all_episodes = [ self.url_result(smuggle_url( self._URL_TEMPLATE % (program_info['contentType'], episode['contentId']), {'force_noplaylist': True})) # To prevent infinite recursion for episode in season_list['episode']] return self.playlist_result(all_episodes, content_id, episode_title) def _real_extract(self, url): url, data = unsmuggle_url(url, {}) video_id = self._match_id(url) noplaylist = self._downloader.params.get('noplaylist') noplaylist_prompt = True if 'force_noplaylist' in data: noplaylist = data['force_noplaylist'] noplaylist_prompt = False webpage = self._download_webpage(url, video_id) program_info = self._parse_json(self._search_regex( r'var\s+programInfo\s*=\s*([^;]+)', webpage, 'VOD data', default='{}'), video_id) season_list = list(program_info.get('seasonList', {}).values()) if season_list: if not noplaylist: return self._extract_playlist( season_list[0], video_id, program_info, prompt=noplaylist_prompt) if noplaylist_prompt: self.to_screen('Downloading just video %s because of --no-playlist' % video_id) # In browsers `getMainUrl` request is always issued. Usually this # endpoint gives the same result as the data embedded in the webpage. # If georestricted, there are no embedded data, so an extra request is # necessary to get the error code if 'assetId' not in program_info: program_info = self._download_json( 'https://www.litv.tv/vod/ajax/getProgramInfo', video_id, query={'contentId': video_id}, headers={'Accept': 'application/json'}) video_data = self._parse_json(self._search_regex( r'uiHlsUrl\s*=\s*testBackendData\(([^;]+)\);', webpage, 'video data', default='{}'), video_id) if not video_data: payload = { 'assetId': program_info['assetId'], 'watchDevices': program_info['watchDevices'], 'contentType': program_info['contentType'], } video_data = self._download_json( 'https://www.litv.tv/vod/getMainUrl', video_id, data=json.dumps(payload).encode('utf-8'), headers={'Content-Type': 'application/json'}) if not video_data.get('fullpath'): error_msg = video_data.get('errorMessage') if error_msg == 'vod.error.outsideregionerror': self.raise_geo_restricted('This video is available in Taiwan only') if error_msg: raise ExtractorError('%s said: %s' % (self.IE_NAME, error_msg), expected=True) raise ExtractorError('Unexpected result from %s' % self.IE_NAME) formats = self._extract_m3u8_formats( video_data['fullpath'], video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls') for a_format in formats: # LiTV HLS segments doesn't like compressions a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = True title = program_info['title'] + program_info.get('secondaryMark', '') description = program_info.get('description') thumbnail = program_info.get('imageFile') categories = [item['name'] for item in program_info.get('category', [])] episode = int_or_none(program_info.get('episode')) return { 'id': video_id, 'formats': formats, 'title': title, 'description': description, 'thumbnail': thumbnail, 'categories': categories, 'episode_number': episode, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/livejournal.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import int_or_none class LiveJournalIE(InfoExtractor): _VALID_URL = r'https?://(?:[^.]+\.)?livejournal\.com/video/album/\d+.+?\bid=(?P<id>\d+)' _TEST = { 'url': 'https://andrei-bt.livejournal.com/video/album/407/?mode=view&id=51272', 'md5': 'adaf018388572ced8a6f301ace49d4b2', 'info_dict': { 'id': '1263729', 'ext': 'mp4', 'title': 'Истребители против БПЛА', 'upload_date': '20190624', 'timestamp': 1561406715, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) record = self._parse_json(self._search_regex( r'Site\.page\s*=\s*({.+?});', webpage, 'page data'), video_id)['video']['record'] storage_id = compat_str(record['storageid']) title = record.get('name') if title: # remove filename extension(.mp4, .mov, etc...) title = title.rsplit('.', 1)[0] return { '_type': 'url_transparent', 'id': video_id, 'title': title, 'thumbnail': record.get('thumbnail'), 'timestamp': int_or_none(record.get('timecreate')), 'url': 'eagleplatform:vc.videos.livejournal.com:' + storage_id, 'ie_key': 'EaglePlatform', }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/liveleak.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import int_or_none class LiveLeakIE(InfoExtractor): _VALID_URL = r'https?://(?:\w+\.)?liveleak\.com/view\?.*?\b[it]=(?P<id>[\w_]+)' _TESTS = [{ 'url': 'http://www.liveleak.com/view?i=757_1364311680', 'md5': '0813c2430bea7a46bf13acf3406992f4', 'info_dict': { 'id': '757_1364311680', 'ext': 'mp4', 'description': 'extremely bad day for this guy..!', 'uploader': 'ljfriel2', 'title': 'Most unlucky car accident', 'thumbnail': r're:^https?://.*\.jpg$' } }, { 'url': 'http://www.liveleak.com/view?i=f93_1390833151', 'md5': 'd3f1367d14cc3c15bf24fbfbe04b9abf', 'info_dict': { 'id': 'f93_1390833151', 'ext': 'mp4', 'description': 'German Television Channel NDR does an exclusive interview with Edward Snowden.\r\nUploaded on LiveLeak cause German Television thinks the rest of the world isn\'t intereseted in Edward Snowden.', 'uploader': 'ARD_Stinkt', 'title': 'German Television does first Edward Snowden Interview (ENGLISH)', 'thumbnail': r're:^https?://.*\.jpg$' } }, { # Prochan embed 'url': 'http://www.liveleak.com/view?i=4f7_1392687779', 'md5': '42c6d97d54f1db107958760788c5f48f', 'info_dict': { 'id': '4f7_1392687779', 'ext': 'mp4', 'description': "The guy with the cigarette seems amazingly nonchalant about the whole thing... I really hope my friends' reactions would be a bit stronger.\r\n\r\nAction-go to 0:55.", 'uploader': 'CapObveus', 'title': 'Man is Fatally Struck by Reckless Car While Packing up a Moving Truck', 'age_limit': 18, }, 'skip': 'Video is dead', }, { # Covers https://github.com/ytdl-org/youtube-dl/pull/5983 # Multiple resolutions 'url': 'http://www.liveleak.com/view?i=801_1409392012', 'md5': 'c3a449dbaca5c0d1825caecd52a57d7b', 'info_dict': { 'id': '801_1409392012', 'ext': 'mp4', 'description': 'Happened on 27.7.2014. \r\nAt 0:53 you can see people still swimming at near beach.', 'uploader': 'bony333', 'title': 'Crazy Hungarian tourist films close call waterspout in Croatia', 'thumbnail': r're:^https?://.*\.jpg$' } }, { # Covers https://github.com/ytdl-org/youtube-dl/pull/10664#issuecomment-247439521 'url': 'http://m.liveleak.com/view?i=763_1473349649', 'add_ie': ['Youtube'], 'info_dict': { 'id': '763_1473349649', 'ext': 'mp4', 'title': 'Reporters and public officials ignore epidemic of black on asian violence in Sacramento | Colin Flaherty', 'description': 'Colin being the warrior he is and showing the injustice Asians in Sacramento are being subjected to.', 'uploader': 'Ziz', 'upload_date': '20160908', 'uploader_id': 'UCEbta5E_jqlZmEJsriTEtnw' }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.liveleak.com/view?i=677_1439397581', 'info_dict': { 'id': '677_1439397581', 'title': 'Fuel Depot in China Explosion caught on video', }, 'playlist_count': 3, }, { 'url': 'https://www.liveleak.com/view?t=HvHi_1523016227', 'only_matching': True, }, { # No original video 'url': 'https://www.liveleak.com/view?t=C26ZZ_1558612804', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+src="(https?://(?:\w+\.)?liveleak\.com/ll_embed\?[^"]*[ift]=[\w_]+[^"]+)"', webpage) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_title = self._og_search_title(webpage).replace('LiveLeak.com -', '').strip() video_description = self._og_search_description(webpage) video_uploader = self._html_search_regex( r'By:.*?(\w+)</a>', webpage, 'uploader', fatal=False) age_limit = int_or_none(self._search_regex( r'you confirm that you are ([0-9]+) years and over.', webpage, 'age limit', default=None)) video_thumbnail = self._og_search_thumbnail(webpage) entries = self._parse_html5_media_entries(url, webpage, video_id) if not entries: # Maybe an embed? embed_url = self._search_regex( r'<iframe[^>]+src="((?:https?:)?//(?:www\.)?(?:prochan|youtube)\.com/embed[^"]+)"', webpage, 'embed URL') return { '_type': 'url_transparent', 'url': embed_url, 'id': video_id, 'title': video_title, 'description': video_description, 'uploader': video_uploader, 'age_limit': age_limit, } for idx, info_dict in enumerate(entries): formats = [] for a_format in info_dict['formats']: if not a_format.get('height'): a_format['height'] = int_or_none(self._search_regex( r'([0-9]+)p\.mp4', a_format['url'], 'height label', default=None)) formats.append(a_format) # Removing '.*.mp4' gives the raw video, which is essentially # the same video without the LiveLeak logo at the top (see # https://github.com/ytdl-org/youtube-dl/pull/4768) orig_url = re.sub(r'\.mp4\.[^.]+', '', a_format['url']) if a_format['url'] != orig_url: format_id = a_format.get('format_id') format_id = 'original' + ('-' + format_id if format_id else '') if self._is_valid_url(orig_url, video_id, format_id): formats.append({ 'format_id': format_id, 'url': orig_url, 'preference': 1, }) self._sort_formats(formats) info_dict['formats'] = formats # Don't append entry ID for one-video pages to keep backward compatibility if len(entries) > 1: info_dict['id'] = '%s_%s' % (video_id, idx + 1) else: info_dict['id'] = video_id info_dict.update({ 'title': video_title, 'description': video_description, 'uploader': video_uploader, 'age_limit': age_limit, 'thumbnail': video_thumbnail, }) return self.playlist_result(entries, video_id, video_title) class LiveLeakEmbedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?liveleak\.com/ll_embed\?.*?\b(?P<kind>[ift])=(?P<id>[\w_]+)' # See generic.py for actual test cases _TESTS = [{ 'url': 'https://www.liveleak.com/ll_embed?i=874_1459135191', 'only_matching': True, }, { 'url': 'https://www.liveleak.com/ll_embed?f=ab065df993c1', 'only_matching': True, }] def _real_extract(self, url): kind, video_id = re.match(self._VALID_URL, url).groups() if kind == 'f': webpage = self._download_webpage(url, video_id) liveleak_url = self._search_regex( r'(?:logourl\s*:\s*|window\.open\()(?P<q1>[\'"])(?P<url>%s)(?P=q1)' % LiveLeakIE._VALID_URL, webpage, 'LiveLeak URL', group='url') else: liveleak_url = 'http://www.liveleak.com/view?%s=%s' % (kind, video_id) return self.url_result(liveleak_url, ie=LiveLeakIE.ie_key())
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/livestream.py
from __future__ import unicode_literals import re import itertools from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( find_xpath_attr, xpath_attr, xpath_with_ns, xpath_text, orderedSet, update_url_query, int_or_none, float_or_none, parse_iso8601, determine_ext, ) class LivestreamIE(InfoExtractor): IE_NAME = 'livestream' _VALID_URL = r'https?://(?:new\.)?livestream\.com/(?:accounts/(?P<account_id>\d+)|(?P<account_name>[^/]+))/(?:events/(?P<event_id>\d+)|(?P<event_name>[^/]+))(?:/videos/(?P<id>\d+))?' _TESTS = [{ 'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370', 'md5': '53274c76ba7754fb0e8d072716f2292b', 'info_dict': { 'id': '4719370', 'ext': 'mp4', 'title': 'Live from Webster Hall NYC', 'timestamp': 1350008072, 'upload_date': '20121012', 'duration': 5968.0, 'like_count': int, 'view_count': int, 'thumbnail': r're:^http://.*\.jpg$' } }, { 'url': 'http://new.livestream.com/tedx/cityenglish', 'info_dict': { 'title': 'TEDCity2.0 (English)', 'id': '2245590', }, 'playlist_mincount': 4, }, { 'url': 'http://new.livestream.com/chess24/tatasteelchess', 'info_dict': { 'title': 'Tata Steel Chess', 'id': '3705884', }, 'playlist_mincount': 60, }, { 'url': 'https://new.livestream.com/accounts/362/events/3557232/videos/67864563/player?autoPlay=false&height=360&mute=false&width=640', 'only_matching': True, }, { 'url': 'http://livestream.com/bsww/concacafbeachsoccercampeonato2015', 'only_matching': True, }] _API_URL_TEMPLATE = 'http://livestream.com/api/accounts/%s/events/%s' def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None): base_ele = find_xpath_attr( smil, self._xpath_ns('.//meta', namespace), 'name', 'httpBase') base = base_ele.get('content') if base_ele is not None else 'http://livestreamvod-f.akamaihd.net/' formats = [] video_nodes = smil.findall(self._xpath_ns('.//video', namespace)) for vn in video_nodes: tbr = int_or_none(vn.attrib.get('system-bitrate'), 1000) furl = ( update_url_query(compat_urlparse.urljoin(base, vn.attrib['src']), { 'v': '3.0.3', 'fp': 'WIN% 14,0,0,145', })) if 'clipBegin' in vn.attrib: furl += '&ssek=' + vn.attrib['clipBegin'] formats.append({ 'url': furl, 'format_id': 'smil_%d' % tbr, 'ext': 'flv', 'tbr': tbr, 'preference': -1000, }) return formats def _extract_video_info(self, video_data): video_id = compat_str(video_data['id']) FORMAT_KEYS = ( ('sd', 'progressive_url'), ('hd', 'progressive_url_hd'), ) formats = [] for format_id, key in FORMAT_KEYS: video_url = video_data.get(key) if video_url: ext = determine_ext(video_url) if ext == 'm3u8': continue bitrate = int_or_none(self._search_regex( r'(\d+)\.%s' % ext, video_url, 'bitrate', default=None)) formats.append({ 'url': video_url, 'format_id': format_id, 'tbr': bitrate, 'ext': ext, }) smil_url = video_data.get('smil_url') if smil_url: formats.extend(self._extract_smil_formats(smil_url, video_id, fatal=False)) m3u8_url = video_data.get('m3u8_url') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) f4m_url = video_data.get('f4m_url') if f4m_url: formats.extend(self._extract_f4m_formats( f4m_url, video_id, f4m_id='hds', fatal=False)) self._sort_formats(formats) comments = [{ 'author_id': comment.get('author_id'), 'author': comment.get('author', {}).get('full_name'), 'id': comment.get('id'), 'text': comment['text'], 'timestamp': parse_iso8601(comment.get('created_at')), } for comment in video_data.get('comments', {}).get('data', [])] return { 'id': video_id, 'formats': formats, 'title': video_data['caption'], 'description': video_data.get('description'), 'thumbnail': video_data.get('thumbnail_url'), 'duration': float_or_none(video_data.get('duration'), 1000), 'timestamp': parse_iso8601(video_data.get('publish_at')), 'like_count': video_data.get('likes', {}).get('total'), 'comment_count': video_data.get('comments', {}).get('total'), 'view_count': video_data.get('views'), 'comments': comments, } def _extract_stream_info(self, stream_info): broadcast_id = compat_str(stream_info['broadcast_id']) is_live = stream_info.get('is_live') formats = [] smil_url = stream_info.get('play_url') if smil_url: formats.extend(self._extract_smil_formats(smil_url, broadcast_id)) m3u8_url = stream_info.get('m3u8_url') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, broadcast_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) rtsp_url = stream_info.get('rtsp_url') if rtsp_url: formats.append({ 'url': rtsp_url, 'format_id': 'rtsp', }) self._sort_formats(formats) return { 'id': broadcast_id, 'formats': formats, 'title': self._live_title(stream_info['stream_title']) if is_live else stream_info['stream_title'], 'thumbnail': stream_info.get('thumbnail_url'), 'is_live': is_live, } def _extract_event(self, event_data): event_id = compat_str(event_data['id']) account_id = compat_str(event_data['owner_account_id']) feed_root_url = self._API_URL_TEMPLATE % (account_id, event_id) + '/feed.json' stream_info = event_data.get('stream_info') if stream_info: return self._extract_stream_info(stream_info) last_video = None entries = [] for i in itertools.count(1): if last_video is None: info_url = feed_root_url else: info_url = '{root}?&id={id}&newer=-1&type=video'.format( root=feed_root_url, id=last_video) videos_info = self._download_json( info_url, event_id, 'Downloading page {0}'.format(i))['data'] videos_info = [v['data'] for v in videos_info if v['type'] == 'video'] if not videos_info: break for v in videos_info: v_id = compat_str(v['id']) entries.append(self.url_result( 'http://livestream.com/accounts/%s/events/%s/videos/%s' % (account_id, event_id, v_id), 'Livestream', v_id, v.get('caption'))) last_video = videos_info[-1]['id'] return self.playlist_result(entries, event_id, event_data['full_name']) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') event = mobj.group('event_id') or mobj.group('event_name') account = mobj.group('account_id') or mobj.group('account_name') api_url = self._API_URL_TEMPLATE % (account, event) if video_id: video_data = self._download_json( api_url + '/videos/%s' % video_id, video_id) return self._extract_video_info(video_data) else: event_data = self._download_json(api_url, video_id) return self._extract_event(event_data) # The original version of Livestream uses a different system class LivestreamOriginalIE(InfoExtractor): IE_NAME = 'livestream:original' _VALID_URL = r'''(?x)https?://original\.livestream\.com/ (?P<user>[^/\?#]+)(?:/(?P<type>video|folder) (?:(?:\?.*?Id=|/)(?P<id>.*?)(&|$))?)? ''' _TESTS = [{ 'url': 'http://original.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb', 'info_dict': { 'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb', 'ext': 'mp4', 'title': 'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital', 'duration': 771.301, 'view_count': int, }, }, { 'url': 'https://original.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3', 'info_dict': { 'id': 'a07bf706-d0e4-4e75-a747-b021d84f2fd3', }, 'playlist_mincount': 4, }, { # live stream 'url': 'http://original.livestream.com/znsbahamas', 'only_matching': True, }] def _extract_video_info(self, user, video_id): api_url = 'http://x%sx.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id=%s' % (user, video_id) info = self._download_xml(api_url, video_id) item = info.find('channel').find('item') title = xpath_text(item, 'title') media_ns = {'media': 'http://search.yahoo.com/mrss'} thumbnail_url = xpath_attr( item, xpath_with_ns('media:thumbnail', media_ns), 'url') duration = float_or_none(xpath_attr( item, xpath_with_ns('media:content', media_ns), 'duration')) ls_ns = {'ls': 'http://api.channel.livestream.com/2.0'} view_count = int_or_none(xpath_text( item, xpath_with_ns('ls:viewsCount', ls_ns))) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail_url, 'duration': duration, 'view_count': view_count, } def _extract_video_formats(self, video_data, video_id): formats = [] progressive_url = video_data.get('progressiveUrl') if progressive_url: formats.append({ 'url': progressive_url, 'format_id': 'http', }) m3u8_url = video_data.get('httpUrl') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) rtsp_url = video_data.get('rtspUrl') if rtsp_url: formats.append({ 'url': rtsp_url, 'format_id': 'rtsp', }) self._sort_formats(formats) return formats def _extract_folder(self, url, folder_id): webpage = self._download_webpage(url, folder_id) paths = orderedSet(re.findall( r'''(?x)(?: <li\s+class="folder">\s*<a\s+href="| <a\s+href="(?=https?://livestre\.am/) )([^"]+)"''', webpage)) entries = [{ '_type': 'url', 'url': compat_urlparse.urljoin(url, p), } for p in paths] return self.playlist_result(entries, folder_id) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) user = mobj.group('user') url_type = mobj.group('type') content_id = mobj.group('id') if url_type == 'folder': return self._extract_folder(url, content_id) else: # this url is used on mobile devices stream_url = 'http://x%sx.api.channel.livestream.com/3.0/getstream.json' % user info = {} if content_id: stream_url += '?id=%s' % content_id info = self._extract_video_info(user, content_id) else: content_id = user webpage = self._download_webpage(url, content_id) info = { 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), 'thumbnail': self._search_regex(r'channelLogo\.src\s*=\s*"([^"]+)"', webpage, 'thumbnail', None), } video_data = self._download_json(stream_url, content_id) is_live = video_data.get('isLive') info.update({ 'id': content_id, 'title': self._live_title(info['title']) if is_live else info['title'], 'formats': self._extract_video_formats(video_data, content_id), 'is_live': is_live, }) return info # The server doesn't support HEAD request, the generic extractor can't detect # the redirection class LivestreamShortenerIE(InfoExtractor): IE_NAME = 'livestream:shortener' IE_DESC = False # Do not list _VALID_URL = r'https?://livestre\.am/(?P<id>.+)' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) id = mobj.group('id') webpage = self._download_webpage(url, id) return self.url_result(self._og_search_url(webpage))
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/lnkgo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, unified_strdate, ) class LnkGoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?lnkgo\.(?:alfa\.)?lt/visi-video/(?P<show>[^/]+)/ziurek-(?P<id>[A-Za-z0-9-]+)' _TESTS = [{ 'url': 'http://lnkgo.alfa.lt/visi-video/yra-kaip-yra/ziurek-yra-kaip-yra-162', 'info_dict': { 'id': '46712', 'ext': 'mp4', 'title': 'Yra kaip yra', 'upload_date': '20150107', 'description': 'md5:d82a5e36b775b7048617f263a0e3475e', 'age_limit': 7, 'duration': 3019, 'thumbnail': r're:^https?://.*\.jpg$' }, 'params': { 'skip_download': True, # HLS download }, }, { 'url': 'http://lnkgo.alfa.lt/visi-video/aktualai-pratesimas/ziurek-nerdas-taiso-kompiuteri-2', 'info_dict': { 'id': '47289', 'ext': 'mp4', 'title': 'Nėrdas: Kompiuterio Valymas', 'upload_date': '20150113', 'description': 'md5:7352d113a242a808676ff17e69db6a69', 'age_limit': 18, 'duration': 346, 'thumbnail': r're:^https?://.*\.jpg$' }, 'params': { 'skip_download': True, # HLS download }, }, { 'url': 'http://www.lnkgo.lt/visi-video/aktualai-pratesimas/ziurek-putka-trys-klausimai', 'only_matching': True, }] _AGE_LIMITS = { 'N-7': 7, 'N-14': 14, 'S': 18, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage( url, display_id, 'Downloading player webpage') video_id = self._search_regex( r'data-ep="([^"]+)"', webpage, 'video ID') title = self._og_search_title(webpage) description = self._og_search_description(webpage) upload_date = unified_strdate(self._search_regex( r'class="[^"]*meta-item[^"]*air-time[^"]*">.*?<strong>([^<]+)</strong>', webpage, 'upload date', fatal=False)) thumbnail_w = int_or_none( self._og_search_property('image:width', webpage, 'thumbnail width', fatal=False)) thumbnail_h = int_or_none( self._og_search_property('image:height', webpage, 'thumbnail height', fatal=False)) thumbnail = { 'url': self._og_search_thumbnail(webpage), } if thumbnail_w and thumbnail_h: thumbnail.update({ 'width': thumbnail_w, 'height': thumbnail_h, }) config = self._parse_json(self._search_regex( r'episodePlayer\((\{.*?\}),\s*\{', webpage, 'sources'), video_id) if config.get('pGeo'): self.report_warning( 'This content might not be available in your country due to copyright reasons') formats = [{ 'format_id': 'hls', 'ext': 'mp4', 'url': config['EpisodeVideoLink_HLS'], }] m = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<play_path>.+)$', config['EpisodeVideoLink']) if m: formats.append({ 'format_id': 'rtmp', 'ext': 'flv', 'url': m.group('url'), 'play_path': m.group('play_path'), 'page_url': url, }) self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'title': title, 'formats': formats, 'thumbnails': [thumbnail], 'duration': int_or_none(config.get('VideoTime')), 'description': description, 'age_limit': self._AGE_LIMITS.get(config.get('PGRating'), 0), 'upload_date': upload_date, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/localnews8.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class LocalNews8IE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?localnews8\.com/(?:[^/]+/)*(?P<display_id>[^/]+)/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.localnews8.com/news/rexburg-business-turns-carbon-fiber-scraps-into-wedding-rings/35183304', 'md5': 'be4d48aea61aa2bde7be2ee47691ad20', 'info_dict': { 'id': '35183304', 'display_id': 'rexburg-business-turns-carbon-fiber-scraps-into-wedding-rings', 'ext': 'mp4', 'title': 'Rexburg business turns carbon fiber scraps into wedding ring', 'description': 'The process was first invented by Lamborghini and less than a dozen companies around the world use it.', 'duration': 153, 'timestamp': 1441844822, 'upload_date': '20150910', 'uploader_id': 'api', } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') webpage = self._download_webpage(url, display_id) partner_id = self._search_regex( r'partnerId\s*[:=]\s*(["\'])(?P<id>\d+)\1', webpage, 'partner id', group='id') kaltura_id = self._search_regex( r'videoIdString\s*[:=]\s*(["\'])kaltura:(?P<id>[0-9a-z_]+)\1', webpage, 'videl id', group='id') return { '_type': 'url_transparent', 'url': 'kaltura:%s:%s' % (partner_id, kaltura_id), 'ie_key': 'Kaltura', 'id': video_id, 'display_id': display_id, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/lovehomeporn.py
from __future__ import unicode_literals import re from .nuevo import NuevoBaseIE class LoveHomePornIE(NuevoBaseIE): _VALID_URL = r'https?://(?:www\.)?lovehomeporn\.com/video/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?' _TEST = { 'url': 'http://lovehomeporn.com/video/48483/stunning-busty-brunette-girlfriend-sucking-and-riding-a-big-dick#menu', 'info_dict': { 'id': '48483', 'display_id': 'stunning-busty-brunette-girlfriend-sucking-and-riding-a-big-dick', 'ext': 'mp4', 'title': 'Stunning busty brunette girlfriend sucking and riding a big dick', 'age_limit': 18, 'duration': 238.47, }, 'params': { 'skip_download': True, } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') info = self._extract_nuevo( 'http://lovehomeporn.com/media/nuevo/config.php?key=%s' % video_id, video_id) info.update({ 'display_id': display_id, 'age_limit': 18 }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/lrt.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, parse_duration, remove_end, ) class LRTIE(InfoExtractor): IE_NAME = 'lrt.lt' _VALID_URL = r'https?://(?:www\.)?lrt\.lt/mediateka/irasas/(?P<id>[0-9]+)' _TESTS = [{ # m3u8 download 'url': 'http://www.lrt.lt/mediateka/irasas/54391/', 'md5': 'fe44cf7e4ab3198055f2c598fc175cb0', 'info_dict': { 'id': '54391', 'ext': 'mp4', 'title': 'Septynios Kauno dienos', 'description': 'md5:24d84534c7dc76581e59f5689462411a', 'duration': 1783, 'view_count': int, 'like_count': int, }, }, { # direct mp3 download 'url': 'http://www.lrt.lt/mediateka/irasas/1013074524/', 'md5': '389da8ca3cad0f51d12bed0c844f6a0a', 'info_dict': { 'id': '1013074524', 'ext': 'mp3', 'title': 'Kita tema 2016-09-05 15:05', 'description': 'md5:1b295a8fc7219ed0d543fc228c931fb5', 'duration': 3008, 'view_count': int, 'like_count': int, }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = remove_end(self._og_search_title(webpage), ' - LRT') formats = [] for _, file_url in re.findall( r'file\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage): ext = determine_ext(file_url) if ext not in ('m3u8', 'mp3'): continue # mp3 served as m3u8 produces stuttered media file if ext == 'm3u8' and '.mp3' in file_url: continue if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( file_url, video_id, 'mp4', entry_protocol='m3u8_native', fatal=False)) elif ext == 'mp3': formats.append({ 'url': file_url, 'vcodec': 'none', }) self._sort_formats(formats) thumbnail = self._og_search_thumbnail(webpage) description = self._og_search_description(webpage) duration = parse_duration(self._search_regex( r'var\s+record_len\s*=\s*(["\'])(?P<duration>[0-9]+:[0-9]+:[0-9]+)\1', webpage, 'duration', default=None, group='duration')) view_count = int_or_none(self._html_search_regex( r'<div[^>]+class=(["\']).*?record-desc-seen.*?\1[^>]*>(?P<count>.+?)</div>', webpage, 'view count', fatal=False, group='count')) like_count = int_or_none(self._search_regex( r'<span[^>]+id=(["\'])flikesCount.*?\1>(?P<count>\d+)<', webpage, 'like count', fatal=False, group='count')) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'description': description, 'duration': duration, 'view_count': view_count, 'like_count': like_count, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/lynda.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( ExtractorError, int_or_none, urlencode_postdata, ) class LyndaBaseIE(InfoExtractor): _SIGNIN_URL = 'https://www.lynda.com/signin/lynda' _PASSWORD_URL = 'https://www.lynda.com/signin/password' _USER_URL = 'https://www.lynda.com/signin/user' _ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to provide lynda.com account credentials.' _NETRC_MACHINE = 'lynda' def _real_initialize(self): self._login() @staticmethod def _check_error(json_string, key_or_keys): keys = [key_or_keys] if isinstance(key_or_keys, compat_str) else key_or_keys for key in keys: error = json_string.get(key) if error: raise ExtractorError('Unable to login: %s' % error, expected=True) def _login_step(self, form_html, fallback_action_url, extra_form_data, note, referrer_url): action_url = self._search_regex( r'<form[^>]+action=(["\'])(?P<url>.+?)\1', form_html, 'post url', default=fallback_action_url, group='url') if not action_url.startswith('http'): action_url = compat_urlparse.urljoin(self._SIGNIN_URL, action_url) form_data = self._hidden_inputs(form_html) form_data.update(extra_form_data) response = self._download_json( action_url, None, note, data=urlencode_postdata(form_data), headers={ 'Referer': referrer_url, 'X-Requested-With': 'XMLHttpRequest', }, expected_status=(418, 500, )) self._check_error(response, ('email', 'password', 'ErrorMessage')) return response, action_url def _login(self): username, password = self._get_login_info() if username is None: return # Step 1: download signin page signin_page = self._download_webpage( self._SIGNIN_URL, None, 'Downloading signin page') # Already logged in if any(re.search(p, signin_page) for p in ( r'isLoggedIn\s*:\s*true', r'logout\.aspx', r'>Log out<')): return # Step 2: submit email signin_form = self._search_regex( r'(?s)(<form[^>]+data-form-name=["\']signin["\'][^>]*>.+?</form>)', signin_page, 'signin form') signin_page, signin_url = self._login_step( signin_form, self._PASSWORD_URL, {'email': username}, 'Submitting email', self._SIGNIN_URL) # Step 3: submit password password_form = signin_page['body'] self._login_step( password_form, self._USER_URL, {'email': username, 'password': password}, 'Submitting password', signin_url) class LyndaIE(LyndaBaseIE): IE_NAME = 'lynda' IE_DESC = 'lynda.com videos' _VALID_URL = r'''(?x) https?:// (?:www\.)?(?:lynda\.com|educourse\.ga)/ (?: (?:[^/]+/){2,3}(?P<course_id>\d+)| player/embed )/ (?P<id>\d+) ''' _TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]' _TESTS = [{ 'url': 'https://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html', # md5 is unstable 'info_dict': { 'id': '114408', 'ext': 'mp4', 'title': 'Using the exercise files', 'duration': 68 } }, { 'url': 'https://www.lynda.com/player/embed/133770?tr=foo=1;bar=g;fizz=rt&fs=0', 'only_matching': True, }, { 'url': 'https://educourse.ga/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html', 'only_matching': True, }, { 'url': 'https://www.lynda.com/de/Graphic-Design-tutorials/Willkommen-Grundlagen-guten-Gestaltung/393570/393572-4.html', 'only_matching': True, }, { # Status="NotFound", Message="Transcript not found" 'url': 'https://www.lynda.com/ASP-NET-tutorials/What-you-should-know/5034180/2811512-4.html', 'only_matching': True, }] def _raise_unavailable(self, video_id): self.raise_login_required( 'Video %s is only available for members' % video_id) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') course_id = mobj.group('course_id') query = { 'videoId': video_id, 'type': 'video', } video = self._download_json( 'https://www.lynda.com/ajax/player', video_id, 'Downloading video JSON', fatal=False, query=query) # Fallback scenario if not video: query['courseId'] = course_id play = self._download_json( 'https://www.lynda.com/ajax/course/%s/%s/play' % (course_id, video_id), video_id, 'Downloading play JSON') if not play: self._raise_unavailable(video_id) formats = [] for formats_dict in play: urls = formats_dict.get('urls') if not isinstance(urls, dict): continue cdn = formats_dict.get('name') for format_id, format_url in urls.items(): if not format_url: continue formats.append({ 'url': format_url, 'format_id': '%s-%s' % (cdn, format_id) if cdn else format_id, 'height': int_or_none(format_id), }) self._sort_formats(formats) conviva = self._download_json( 'https://www.lynda.com/ajax/player/conviva', video_id, 'Downloading conviva JSON', query=query) return { 'id': video_id, 'title': conviva['VideoTitle'], 'description': conviva.get('VideoDescription'), 'release_year': int_or_none(conviva.get('ReleaseYear')), 'duration': int_or_none(conviva.get('Duration')), 'creator': conviva.get('Author'), 'formats': formats, } if 'Status' in video: raise ExtractorError( 'lynda returned error: %s' % video['Message'], expected=True) if video.get('HasAccess') is False: self._raise_unavailable(video_id) video_id = compat_str(video.get('ID') or video_id) duration = int_or_none(video.get('DurationInSeconds')) title = video['Title'] formats = [] fmts = video.get('Formats') if fmts: formats.extend([{ 'url': f['Url'], 'ext': f.get('Extension'), 'width': int_or_none(f.get('Width')), 'height': int_or_none(f.get('Height')), 'filesize': int_or_none(f.get('FileSize')), 'format_id': compat_str(f.get('Resolution')) if f.get('Resolution') else None, } for f in fmts if f.get('Url')]) prioritized_streams = video.get('PrioritizedStreams') if prioritized_streams: for prioritized_stream_id, prioritized_stream in prioritized_streams.items(): formats.extend([{ 'url': video_url, 'height': int_or_none(format_id), 'format_id': '%s-%s' % (prioritized_stream_id, format_id), } for format_id, video_url in prioritized_stream.items()]) self._check_formats(formats, video_id) self._sort_formats(formats) subtitles = self.extract_subtitles(video_id) return { 'id': video_id, 'title': title, 'duration': duration, 'subtitles': subtitles, 'formats': formats } def _fix_subtitles(self, subs): srt = '' seq_counter = 0 for pos in range(0, len(subs) - 1): seq_current = subs[pos] m_current = re.match(self._TIMECODE_REGEX, seq_current['Timecode']) if m_current is None: continue seq_next = subs[pos + 1] m_next = re.match(self._TIMECODE_REGEX, seq_next['Timecode']) if m_next is None: continue appear_time = m_current.group('timecode') disappear_time = m_next.group('timecode') text = seq_current['Caption'].strip() if text: seq_counter += 1 srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (seq_counter, appear_time, disappear_time, text) if srt: return srt def _get_subtitles(self, video_id): url = 'https://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id subs = self._download_webpage( url, video_id, 'Downloading subtitles JSON', fatal=False) if not subs or 'Status="NotFound"' in subs: return {} subs = self._parse_json(subs, video_id, fatal=False) if not subs: return {} fixed_subs = self._fix_subtitles(subs) if fixed_subs: return {'en': [{'ext': 'srt', 'data': fixed_subs}]} return {} class LyndaCourseIE(LyndaBaseIE): IE_NAME = 'lynda:course' IE_DESC = 'lynda.com online courses' # Course link equals to welcome/introduction video link of same course # We will recognize it as course link _VALID_URL = r'https?://(?:www|m)\.(?:lynda\.com|educourse\.ga)/(?P<coursepath>(?:[^/]+/){2,3}(?P<courseid>\d+))-2\.html' _TESTS = [{ 'url': 'https://www.lynda.com/Graphic-Design-tutorials/Grundlagen-guten-Gestaltung/393570-2.html', 'only_matching': True, }, { 'url': 'https://www.lynda.com/de/Graphic-Design-tutorials/Grundlagen-guten-Gestaltung/393570-2.html', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) course_path = mobj.group('coursepath') course_id = mobj.group('courseid') item_template = 'https://www.lynda.com/%s/%%s-4.html' % course_path course = self._download_json( 'https://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id, course_id, 'Downloading course JSON', fatal=False) if not course: webpage = self._download_webpage(url, course_id) entries = [ self.url_result( item_template % video_id, ie=LyndaIE.ie_key(), video_id=video_id) for video_id in re.findall( r'data-video-id=["\'](\d+)', webpage)] return self.playlist_result( entries, course_id, self._og_search_title(webpage, fatal=False), self._og_search_description(webpage)) if course.get('Status') == 'NotFound': raise ExtractorError( 'Course %s does not exist' % course_id, expected=True) unaccessible_videos = 0 entries = [] # Might want to extract videos right here from video['Formats'] as it seems 'Formats' is not provided # by single video API anymore for chapter in course['Chapters']: for video in chapter.get('Videos', []): if video.get('HasAccess') is False: unaccessible_videos += 1 continue video_id = video.get('ID') if video_id: entries.append({ '_type': 'url_transparent', 'url': item_template % video_id, 'ie_key': LyndaIE.ie_key(), 'chapter': chapter.get('Title'), 'chapter_number': int_or_none(chapter.get('ChapterIndex')), 'chapter_id': compat_str(chapter.get('ID')), }) if unaccessible_videos > 0: self._downloader.report_warning( '%s videos are only available for members (or paid members) and will not be downloaded. ' % unaccessible_videos + self._ACCOUNT_CREDENTIALS_HINT) course_title = course.get('Title') course_description = course.get('Description') return self.playlist_result(entries, course_id, course_title, course_description)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/m6.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class M6IE(InfoExtractor): IE_NAME = 'm6' _VALID_URL = r'https?://(?:www\.)?m6\.fr/[^/]+/videos/(?P<id>\d+)-[^\.]+\.html' _TEST = { 'url': 'http://www.m6.fr/emission-les_reines_du_shopping/videos/11323908-emeline_est_la_reine_du_shopping_sur_le_theme_ma_fete_d_8217_anniversaire.html', 'md5': '242994a87de2c316891428e0176bcb77', 'info_dict': { 'id': '11323908', 'ext': 'mp4', 'title': 'Emeline est la Reine du Shopping sur le thème « Ma fête d’anniversaire ! »', 'description': 'md5:1212ae8fb4b7baa4dc3886c5676007c2', 'duration': 100, } } def _real_extract(self, url): video_id = self._match_id(url) return self.url_result('6play:%s' % video_id, 'SixPlay', video_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mailru.py
# coding: utf-8 from __future__ import unicode_literals import itertools import json import re from .common import InfoExtractor from ..compat import compat_urllib_parse_unquote from ..utils import ( int_or_none, parse_duration, remove_end, try_get, ) class MailRuIE(InfoExtractor): IE_NAME = 'mailru' IE_DESC = 'Видео@Mail.Ru' _VALID_URL = r'''(?x) https?:// (?:(?:www|m)\.)?my\.mail\.ru/ (?: video/.*\#video=/?(?P<idv1>(?:[^/]+/){3}\d+)| (?:(?P<idv2prefix>(?:[^/]+/){2})video/(?P<idv2suffix>[^/]+/\d+))\.html| (?:video/embed|\+/video/meta)/(?P<metaid>\d+) ) ''' _TESTS = [ { 'url': 'http://my.mail.ru/video/top#video=/mail/sonypicturesrus/75/76', 'md5': 'dea205f03120046894db4ebb6159879a', 'info_dict': { 'id': '46301138_76', 'ext': 'mp4', 'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро', 'timestamp': 1393235077, 'upload_date': '20140224', 'uploader': 'sonypicturesrus', 'uploader_id': '[email protected]', 'duration': 184, }, 'skip': 'Not accessible from Travis CI server', }, { 'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html', 'md5': '00a91a58c3402204dcced523777b475f', 'info_dict': { 'id': '46843144_1263', 'ext': 'mp4', 'title': 'Samsung Galaxy S5 Hammer Smash Fail Battery Explosion', 'timestamp': 1397039888, 'upload_date': '20140409', 'uploader': 'hitech', 'uploader_id': '[email protected]', 'duration': 245, }, 'skip': 'Not accessible from Travis CI server', }, { # only available via metaUrl API 'url': 'http://my.mail.ru/mail/720pizle/video/_myvideo/502.html', 'md5': '3b26d2491c6949d031a32b96bd97c096', 'info_dict': { 'id': '56664382_502', 'ext': 'mp4', 'title': ':8336', 'timestamp': 1449094163, 'upload_date': '20151202', 'uploader': '[email protected]', 'uploader_id': '[email protected]', 'duration': 6001, }, 'skip': 'Not accessible from Travis CI server', }, { 'url': 'http://m.my.mail.ru/mail/3sktvtr/video/_myvideo/138.html', 'only_matching': True, }, { 'url': 'https://my.mail.ru/video/embed/7949340477499637815', 'only_matching': True, }, { 'url': 'http://my.mail.ru/+/video/meta/7949340477499637815', 'only_matching': True, } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) meta_id = mobj.group('metaid') video_id = None if meta_id: meta_url = 'https://my.mail.ru/+/video/meta/%s' % meta_id else: video_id = mobj.group('idv1') if not video_id: video_id = mobj.group('idv2prefix') + mobj.group('idv2suffix') webpage = self._download_webpage(url, video_id) page_config = self._parse_json(self._search_regex( r'(?s)<script[^>]+class="sp-video__page-config"[^>]*>(.+?)</script>', webpage, 'page config', default='{}'), video_id, fatal=False) if page_config: meta_url = page_config.get('metaUrl') or page_config.get('video', {}).get('metaUrl') else: meta_url = None video_data = None if meta_url: video_data = self._download_json( meta_url, video_id or meta_id, 'Downloading video meta JSON', fatal=not video_id) # Fallback old approach if not video_data: video_data = self._download_json( 'http://api.video.mail.ru/videos/%s.json?new=1' % video_id, video_id, 'Downloading video JSON') formats = [] for f in video_data['videos']: video_url = f.get('url') if not video_url: continue format_id = f.get('key') height = int_or_none(self._search_regex( r'^(\d+)[pP]$', format_id, 'height', default=None)) if format_id else None formats.append({ 'url': video_url, 'format_id': format_id, 'height': height, }) self._sort_formats(formats) meta_data = video_data['meta'] title = remove_end(meta_data['title'], '.mp4') author = video_data.get('author') uploader = author.get('name') uploader_id = author.get('id') or author.get('email') view_count = int_or_none(video_data.get('viewsCount') or video_data.get('views_count')) acc_id = meta_data.get('accId') item_id = meta_data.get('itemId') content_id = '%s_%s' % (acc_id, item_id) if acc_id and item_id else video_id thumbnail = meta_data.get('poster') duration = int_or_none(meta_data.get('duration')) timestamp = int_or_none(meta_data.get('timestamp')) return { 'id': content_id, 'title': title, 'thumbnail': thumbnail, 'timestamp': timestamp, 'uploader': uploader, 'uploader_id': uploader_id, 'duration': duration, 'view_count': view_count, 'formats': formats, } class MailRuMusicSearchBaseIE(InfoExtractor): def _search(self, query, url, audio_id, limit=100, offset=0): search = self._download_json( 'https://my.mail.ru/cgi-bin/my/ajax', audio_id, 'Downloading songs JSON page %d' % (offset // limit + 1), headers={ 'Referer': url, 'X-Requested-With': 'XMLHttpRequest', }, query={ 'xemail': '', 'ajax_call': '1', 'func_name': 'music.search', 'mna': '', 'mnb': '', 'arg_query': query, 'arg_extended': '1', 'arg_search_params': json.dumps({ 'music': { 'limit': limit, 'offset': offset, }, }), 'arg_limit': limit, 'arg_offset': offset, }) return next(e for e in search if isinstance(e, dict)) @staticmethod def _extract_track(t, fatal=True): audio_url = t['URL'] if fatal else t.get('URL') if not audio_url: return audio_id = t['File'] if fatal else t.get('File') if not audio_id: return thumbnail = t.get('AlbumCoverURL') or t.get('FiledAlbumCover') uploader = t.get('OwnerName') or t.get('OwnerName_Text_HTML') uploader_id = t.get('UploaderID') duration = int_or_none(t.get('DurationInSeconds')) or parse_duration( t.get('Duration') or t.get('DurationStr')) view_count = int_or_none(t.get('PlayCount') or t.get('PlayCount_hr')) track = t.get('Name') or t.get('Name_Text_HTML') artist = t.get('Author') or t.get('Author_Text_HTML') if track: title = '%s - %s' % (artist, track) if artist else track else: title = audio_id return { 'extractor_key': MailRuMusicIE.ie_key(), 'id': audio_id, 'title': title, 'thumbnail': thumbnail, 'uploader': uploader, 'uploader_id': uploader_id, 'duration': duration, 'view_count': view_count, 'vcodec': 'none', 'abr': int_or_none(t.get('BitRate')), 'track': track, 'artist': artist, 'album': t.get('Album'), 'url': audio_url, } class MailRuMusicIE(MailRuMusicSearchBaseIE): IE_NAME = 'mailru:music' IE_DESC = 'Музыка@Mail.Ru' _VALID_URL = r'https?://my\.mail\.ru/music/songs/[^/?#&]+-(?P<id>[\da-f]+)' _TESTS = [{ 'url': 'https://my.mail.ru/music/songs/%D0%BC8%D0%BB8%D1%82%D1%85-l-a-h-luciferian-aesthetics-of-herrschaft-single-2017-4e31f7125d0dfaef505d947642366893', 'md5': '0f8c22ef8c5d665b13ac709e63025610', 'info_dict': { 'id': '4e31f7125d0dfaef505d947642366893', 'ext': 'mp3', 'title': 'L.A.H. (Luciferian Aesthetics of Herrschaft) single, 2017 - М8Л8ТХ', 'uploader': 'Игорь Мудрый', 'uploader_id': '1459196328', 'duration': 280, 'view_count': int, 'vcodec': 'none', 'abr': 320, 'track': 'L.A.H. (Luciferian Aesthetics of Herrschaft) single, 2017', 'artist': 'М8Л8ТХ', }, }] def _real_extract(self, url): audio_id = self._match_id(url) webpage = self._download_webpage(url, audio_id) title = self._og_search_title(webpage) music_data = self._search(title, url, audio_id)['MusicData'] t = next(t for t in music_data if t.get('File') == audio_id) info = self._extract_track(t) info['title'] = title return info class MailRuMusicSearchIE(MailRuMusicSearchBaseIE): IE_NAME = 'mailru:music:search' IE_DESC = 'Музыка@Mail.Ru' _VALID_URL = r'https?://my\.mail\.ru/music/search/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://my.mail.ru/music/search/black%20shadow', 'info_dict': { 'id': 'black shadow', }, 'playlist_mincount': 532, }] def _real_extract(self, url): query = compat_urllib_parse_unquote(self._match_id(url)) entries = [] LIMIT = 100 offset = 0 for _ in itertools.count(1): search = self._search(query, url, query, LIMIT, offset) music_data = search.get('MusicData') if not music_data or not isinstance(music_data, list): break for t in music_data: track = self._extract_track(t, fatal=False) if track: entries.append(track) total = try_get( search, lambda x: x['Results']['music']['Total'], int) if total is not None: if offset > total: break offset += LIMIT return self.playlist_result(entries, query)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/malltv.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import merge_dicts class MallTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?mall\.tv/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.mall.tv/18-miliard-pro-neziskovky-opravdu-jsou-sportovci-nebo-clovek-v-tisni-pijavice', 'md5': '1c4a37f080e1f3023103a7b43458e518', 'info_dict': { 'id': 't0zzt0', 'display_id': '18-miliard-pro-neziskovky-opravdu-jsou-sportovci-nebo-clovek-v-tisni-pijavice', 'ext': 'mp4', 'title': '18 miliard pro neziskovky. Opravdu jsou sportovci nebo Člověk v tísni pijavice?', 'description': 'md5:25fc0ec42a72ba602b602c683fa29deb', 'duration': 216, 'timestamp': 1538870400, 'upload_date': '20181007', 'view_count': int, } }, { 'url': 'https://www.mall.tv/kdo-to-plati/18-miliard-pro-neziskovky-opravdu-jsou-sportovci-nebo-clovek-v-tisni-pijavice', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage( url, display_id, headers=self.geo_verification_headers()) SOURCE_RE = r'(<source[^>]+\bsrc=(?:(["\'])(?:(?!\2).)+|[^\s]+)/(?P<id>[\da-z]+)/index)\b' video_id = self._search_regex( SOURCE_RE, webpage, 'video id', group='id') media = self._parse_html5_media_entries( url, re.sub(SOURCE_RE, r'\1.m3u8', webpage), video_id, m3u8_id='hls', m3u8_entry_protocol='m3u8_native')[0] info = self._search_json_ld(webpage, video_id, default={}) return merge_dicts(media, info, { 'id': video_id, 'display_id': display_id, 'title': self._og_search_title(webpage, default=None) or display_id, 'description': self._og_search_description(webpage, default=None), 'thumbnail': self._og_search_thumbnail(webpage, default=None), })
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mangomolo.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_b64decode, compat_urllib_parse_unquote, ) from ..utils import int_or_none class MangomoloBaseIE(InfoExtractor): _BASE_REGEX = r'https?://(?:admin\.mangomolo\.com/analytics/index\.php/customers/embed/|player\.mangomolo\.com/v1/)' def _get_real_id(self, page_id): return page_id def _real_extract(self, url): page_id = self._get_real_id(self._match_id(url)) webpage = self._download_webpage( 'https://player.mangomolo.com/v1/%s?%s' % (self._TYPE, url.split('?')[1]), page_id) hidden_inputs = self._hidden_inputs(webpage) m3u8_entry_protocol = 'm3u8' if self._IS_LIVE else 'm3u8_native' format_url = self._html_search_regex( [ r'(?:file|src)\s*:\s*"(https?://[^"]+?/playlist\.m3u8)', r'<a[^>]+href="(rtsp://[^"]+)"' ], webpage, 'format url') formats = self._extract_wowza_formats( format_url, page_id, m3u8_entry_protocol, ['smil']) self._sort_formats(formats) return { 'id': page_id, 'title': self._live_title(page_id) if self._IS_LIVE else page_id, 'uploader_id': hidden_inputs.get('userid'), 'duration': int_or_none(hidden_inputs.get('duration')), 'is_live': self._IS_LIVE, 'formats': formats, } class MangomoloVideoIE(MangomoloBaseIE): _TYPE = 'video' IE_NAME = 'mangomolo:' + _TYPE _VALID_URL = MangomoloBaseIE._BASE_REGEX + r'video\?.*?\bid=(?P<id>\d+)' _IS_LIVE = False class MangomoloLiveIE(MangomoloBaseIE): _TYPE = 'live' IE_NAME = 'mangomolo:' + _TYPE _VALID_URL = MangomoloBaseIE._BASE_REGEX + r'(live|index)\?.*?\bchannelid=(?P<id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)' _IS_LIVE = True def _get_real_id(self, page_id): return compat_b64decode(compat_urllib_parse_unquote(page_id)).decode()
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/manyvids.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, str_to_int, urlencode_postdata, ) class ManyVidsIE(InfoExtractor): _VALID_URL = r'(?i)https?://(?:www\.)?manyvids\.com/video/(?P<id>\d+)' _TESTS = [{ # preview video 'url': 'https://www.manyvids.com/Video/133957/everthing-about-me/', 'md5': '03f11bb21c52dd12a05be21a5c7dcc97', 'info_dict': { 'id': '133957', 'ext': 'mp4', 'title': 'everthing about me (Preview)', 'view_count': int, 'like_count': int, }, }, { # full video 'url': 'https://www.manyvids.com/Video/935718/MY-FACE-REVEAL/', 'md5': 'f3e8f7086409e9b470e2643edb96bdcc', 'info_dict': { 'id': '935718', 'ext': 'mp4', 'title': 'MY FACE REVEAL', 'view_count': int, 'like_count': int, }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_url = self._search_regex( r'data-(?:video-filepath|meta-video)\s*=s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'video URL', group='url') title = self._html_search_regex( (r'<span[^>]+class=["\']item-title[^>]+>([^<]+)', r'<h2[^>]+class=["\']h2 m-0["\'][^>]*>([^<]+)'), webpage, 'title', default=None) or self._html_search_meta( 'twitter:title', webpage, 'title', fatal=True) if any(p in webpage for p in ('preview_videos', '_preview.mp4')): title += ' (Preview)' mv_token = self._search_regex( r'data-mvtoken=(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'mv token', default=None, group='value') if mv_token: # Sets some cookies self._download_webpage( 'https://www.manyvids.com/includes/ajax_repository/you_had_me_at_hello.php', video_id, fatal=False, data=urlencode_postdata({ 'mvtoken': mv_token, 'vid': video_id, }), headers={ 'Referer': url, 'X-Requested-With': 'XMLHttpRequest' }) if determine_ext(video_url) == 'm3u8': formats = self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') else: formats = [{'url': video_url}] like_count = int_or_none(self._search_regex( r'data-likes=["\'](\d+)', webpage, 'like count', default=None)) view_count = str_to_int(self._html_search_regex( r'(?s)<span[^>]+class="views-wrapper"[^>]*>(.+?)</span', webpage, 'view count', default=None)) return { 'id': video_id, 'title': title, 'view_count': view_count, 'like_count': like_count, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/markiza.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( orderedSet, parse_duration, try_get, ) class MarkizaIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?videoarchiv\.markiza\.sk/(?:video/(?:[^/]+/)*|embed/)(?P<id>\d+)(?:[_/]|$)' _TESTS = [{ 'url': 'http://videoarchiv.markiza.sk/video/oteckovia/84723_oteckovia-109', 'md5': 'ada4e9fad038abeed971843aa028c7b0', 'info_dict': { 'id': '139078', 'ext': 'mp4', 'title': 'Oteckovia 109', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2760, }, }, { 'url': 'http://videoarchiv.markiza.sk/video/televizne-noviny/televizne-noviny/85430_televizne-noviny', 'info_dict': { 'id': '85430', 'title': 'Televízne noviny', }, 'playlist_count': 23, }, { 'url': 'http://videoarchiv.markiza.sk/video/oteckovia/84723', 'only_matching': True, }, { 'url': 'http://videoarchiv.markiza.sk/video/84723', 'only_matching': True, }, { 'url': 'http://videoarchiv.markiza.sk/video/filmy/85190_kamenak', 'only_matching': True, }, { 'url': 'http://videoarchiv.markiza.sk/video/reflex/zo-zakulisia/84651_pribeh-alzbetky', 'only_matching': True, }, { 'url': 'http://videoarchiv.markiza.sk/embed/85295', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) data = self._download_json( 'http://videoarchiv.markiza.sk/json/video_jwplayer7.json', video_id, query={'id': video_id}) info = self._parse_jwplayer_data(data, m3u8_id='hls', mpd_id='dash') if info.get('_type') == 'playlist': info.update({ 'id': video_id, 'title': try_get( data, lambda x: x['details']['name'], compat_str), }) else: info['duration'] = parse_duration( try_get(data, lambda x: x['details']['duration'], compat_str)) return info class MarkizaPageIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:(?:[^/]+\.)?markiza|tvnoviny)\.sk/(?:[^/]+/)*(?P<id>\d+)_' _TESTS = [{ 'url': 'http://www.markiza.sk/soubiz/zahranicny/1923705_oteckovia-maju-svoj-den-ti-slavni-nie-su-o-nic-menej-rozkosni', 'md5': 'ada4e9fad038abeed971843aa028c7b0', 'info_dict': { 'id': '139355', 'ext': 'mp4', 'title': 'Oteckovia 110', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2604, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://dajto.markiza.sk/filmy-a-serialy/1774695_frajeri-vo-vegas', 'only_matching': True, }, { 'url': 'http://superstar.markiza.sk/aktualne/1923870_to-je-ale-telo-spevacka-ukazala-sexy-postavicku-v-bikinach', 'only_matching': True, }, { 'url': 'http://hybsa.markiza.sk/aktualne/1923790_uzasna-atmosfera-na-hybsa-v-poprade-superstaristi-si-prve-koncerty-pred-davom-ludi-poriadne-uzili', 'only_matching': True, }, { 'url': 'http://doma.markiza.sk/filmy/1885250_moja-vysnivana-svadba', 'only_matching': True, }, { 'url': 'http://www.tvnoviny.sk/domace/1923887_po-smrti-manzela-ju-cakalo-poriadne-prekvapenie', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if MarkizaIE.suitable(url) else super(MarkizaPageIE, cls).suitable(url) def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage( # Downloading for some hosts (e.g. dajto, doma) fails with 500 # although everything seems to be OK, so considering 500 # status code to be expected. url, playlist_id, expected_status=500) entries = [ self.url_result('http://videoarchiv.markiza.sk/video/%s' % video_id) for video_id in orderedSet(re.findall( r'(?:initPlayer_|data-entity=["\']|id=["\']player_)(\d+)', webpage))] return self.playlist_result(entries, playlist_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/massengeschmacktv.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, int_or_none, js_to_json, mimetype2ext, parse_filesize, ) class MassengeschmackTVIE(InfoExtractor): IE_NAME = 'massengeschmack.tv' _VALID_URL = r'https?://(?:www\.)?massengeschmack\.tv/play/(?P<id>[^?&#]+)' _TEST = { 'url': 'https://massengeschmack.tv/play/fktv202', 'md5': 'a9e054db9c2b5a08f0a0527cc201e8d3', 'info_dict': { 'id': 'fktv202', 'ext': 'mp4', 'title': 'Fernsehkritik-TV - Folge 202', }, } def _real_extract(self, url): episode = self._match_id(url) webpage = self._download_webpage(url, episode) title = clean_html(self._html_search_regex( '<h3>([^<]+)</h3>', webpage, 'title')) thumbnail = self._search_regex(r'POSTER\s*=\s*"([^"]+)', webpage, 'thumbnail', fatal=False) sources = self._parse_json(self._search_regex(r'(?s)MEDIA\s*=\s*(\[.+?\]);', webpage, 'media'), episode, js_to_json) formats = [] for source in sources: furl = source.get('src') if not furl: continue furl = self._proto_relative_url(furl) ext = determine_ext(furl) or mimetype2ext(source.get('type')) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( furl, episode, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'url': furl, 'format_id': determine_ext(furl), }) for (durl, format_id, width, height, filesize) in re.findall(r'''(?x) <a[^>]+?href="(?P<url>(?:https:)?//[^"]+)".*? <strong>(?P<format_id>.+?)</strong>.*? <small>(?:(?P<width>\d+)x(?P<height>\d+))?\s+?\((?P<filesize>[\d,]+\s*[GM]iB)\)</small> ''', webpage): formats.append({ 'url': durl, 'format_id': format_id, 'width': int_or_none(width), 'height': int_or_none(height), 'filesize': parse_filesize(filesize), 'vcodec': 'none' if format_id.startswith('Audio') else None, }) self._sort_formats(formats, ('width', 'height', 'filesize', 'tbr')) return { 'id': episode, 'title': title, 'formats': formats, 'thumbnail': thumbnail, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/matchtv.py
# coding: utf-8 from __future__ import unicode_literals import random from .common import InfoExtractor from ..utils import xpath_text class MatchTVIE(InfoExtractor): _VALID_URL = r'https?://matchtv\.ru(?:/on-air|/?#live-player)' _TESTS = [{ 'url': 'http://matchtv.ru/#live-player', 'info_dict': { 'id': 'matchtv-live', 'ext': 'flv', 'title': r're:^Матч ТВ - Прямой эфир \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'is_live': True, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://matchtv.ru/on-air/', 'only_matching': True, }] def _real_extract(self, url): video_id = 'matchtv-live' video_url = self._download_json( 'http://player.matchtv.ntvplus.tv/player/smil', video_id, query={ 'ts': '', 'quality': 'SD', 'contentId': '561d2c0df7159b37178b4567', 'sign': '', 'includeHighlights': '0', 'userId': '', 'sessionId': random.randint(1, 1000000000), 'contentType': 'channel', 'timeShift': '0', 'platform': 'portal', }, headers={ 'Referer': 'http://player.matchtv.ntvplus.tv/embed-player/NTVEmbedPlayer.swf', })['data']['videoUrl'] f4m_url = xpath_text(self._download_xml(video_url, video_id), './to') formats = self._extract_f4m_formats(f4m_url, video_id) self._sort_formats(formats) return { 'id': video_id, 'title': self._live_title('Матч ТВ - Прямой эфир'), 'is_live': True, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mdr.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( determine_ext, int_or_none, parse_duration, parse_iso8601, xpath_text, ) class MDRIE(InfoExtractor): IE_DESC = 'MDR.DE and KiKA' _VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z-]+-?(?P<id>\d+)(?:_.+?)?\.html' _TESTS = [{ # MDR regularly deletes its videos 'url': 'http://www.mdr.de/fakt/video189002.html', 'only_matching': True, }, { # audio 'url': 'http://www.mdr.de/kultur/audio1312272_zc-15948bad_zs-86171fdd.html', 'md5': '64c4ee50f0a791deb9479cd7bbe9d2fa', 'info_dict': { 'id': '1312272', 'ext': 'mp3', 'title': 'Feuilleton vom 30. Oktober 2015', 'duration': 250, 'uploader': 'MITTELDEUTSCHER RUNDFUNK', }, 'skip': '404 not found', }, { 'url': 'http://www.kika.de/baumhaus/videos/video19636.html', 'md5': '4930515e36b06c111213e80d1e4aad0e', 'info_dict': { 'id': '19636', 'ext': 'mp4', 'title': 'Baumhaus vom 30. Oktober 2015', 'duration': 134, 'uploader': 'KIKA', }, 'skip': '404 not found', }, { 'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/videos/video8182.html', 'md5': '5fe9c4dd7d71e3b238f04b8fdd588357', 'info_dict': { 'id': '8182', 'ext': 'mp4', 'title': 'Beutolomäus und der geheime Weihnachtswunsch', 'description': 'md5:b69d32d7b2c55cbe86945ab309d39bbd', 'timestamp': 1482541200, 'upload_date': '20161224', 'duration': 4628, 'uploader': 'KIKA', }, }, { # audio with alternative playerURL pattern 'url': 'http://www.mdr.de/kultur/videos-und-audios/audio-radio/operation-mindfuck-robert-wilson100.html', 'info_dict': { 'id': '100', 'ext': 'mp4', 'title': 'Feature: Operation Mindfuck - Robert Anton Wilson', 'duration': 3239, 'uploader': 'MITTELDEUTSCHER RUNDFUNK', }, }, { 'url': 'http://www.kika.de/baumhaus/sendungen/video19636_zc-fea7f8a0_zs-4bf89c60.html', 'only_matching': True, }, { 'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/einzelsendung2534.html', 'only_matching': True, }, { 'url': 'http://www.mdr.de/mediathek/mdr-videos/a/video-1334.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data_url = self._search_regex( r'(?:dataURL|playerXml(?:["\'])?)\s*:\s*(["\'])(?P<url>.+?-avCustom\.xml)\1', webpage, 'data url', group='url').replace(r'\/', '/') doc = self._download_xml( compat_urlparse.urljoin(url, data_url), video_id) title = xpath_text(doc, ['./title', './broadcast/broadcastName'], 'title', fatal=True) formats = [] processed_urls = [] for asset in doc.findall('./assets/asset'): for source in ( 'progressiveDownload', 'dynamicHttpStreamingRedirector', 'adaptiveHttpStreamingRedirector'): url_el = asset.find('./%sUrl' % source) if url_el is None: continue video_url = url_el.text if video_url in processed_urls: continue processed_urls.append(video_url) vbr = int_or_none(xpath_text(asset, './bitrateVideo', 'vbr'), 1000) abr = int_or_none(xpath_text(asset, './bitrateAudio', 'abr'), 1000) ext = determine_ext(url_el.text) if ext == 'm3u8': url_formats = self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', preference=0, m3u8_id='HLS', fatal=False) elif ext == 'f4m': url_formats = self._extract_f4m_formats( video_url + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, preference=0, f4m_id='HDS', fatal=False) else: media_type = xpath_text(asset, './mediaType', 'media type', default='MP4') vbr = int_or_none(xpath_text(asset, './bitrateVideo', 'vbr'), 1000) abr = int_or_none(xpath_text(asset, './bitrateAudio', 'abr'), 1000) filesize = int_or_none(xpath_text(asset, './fileSize', 'file size')) f = { 'url': video_url, 'format_id': '%s-%d' % (media_type, vbr or abr), 'filesize': filesize, 'abr': abr, 'preference': 1, } if vbr: width = int_or_none(xpath_text(asset, './frameWidth', 'width')) height = int_or_none(xpath_text(asset, './frameHeight', 'height')) f.update({ 'vbr': vbr, 'width': width, 'height': height, }) url_formats = [f] if not url_formats: continue if not vbr: for f in url_formats: abr = f.get('tbr') or abr if 'tbr' in f: del f['tbr'] f.update({ 'abr': abr, 'vcodec': 'none', }) formats.extend(url_formats) self._sort_formats(formats) description = xpath_text(doc, './broadcast/broadcastDescription', 'description') timestamp = parse_iso8601( xpath_text( doc, [ './broadcast/broadcastDate', './broadcast/broadcastStartDate', './broadcast/broadcastEndDate'], 'timestamp', default=None)) duration = parse_duration(xpath_text(doc, './duration', 'duration')) uploader = xpath_text(doc, './rights', 'uploader') return { 'id': video_id, 'title': title, 'description': description, 'timestamp': timestamp, 'duration': duration, 'uploader': uploader, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/medialaan.py
from __future__ import unicode_literals import re from .gigya import GigyaBaseIE from ..compat import compat_str from ..utils import ( int_or_none, parse_duration, try_get, unified_timestamp, ) class MedialaanIE(GigyaBaseIE): _VALID_URL = r'''(?x) https?:// (?:www\.|nieuws\.)? (?: (?P<site_id>vtm|q2|vtmkzoom)\.be/ (?: video(?:/[^/]+/id/|/?\?.*?\baid=)| (?:[^/]+/)* ) ) (?P<id>[^/?#&]+) ''' _NETRC_MACHINE = 'medialaan' _APIKEY = '3_HZ0FtkMW_gOyKlqQzW5_0FHRC7Nd5XpXJZcDdXY4pk5eES2ZWmejRW5egwVm4ug-' _SITE_TO_APP_ID = { 'vtm': 'vtm_watch', 'q2': 'q2', 'vtmkzoom': 'vtmkzoom', } _TESTS = [{ # vod 'url': 'http://vtm.be/video/volledige-afleveringen/id/vtm_20170219_VM0678361_vtmwatch', 'info_dict': { 'id': 'vtm_20170219_VM0678361_vtmwatch', 'ext': 'mp4', 'title': 'Allemaal Chris afl. 6', 'description': 'md5:4be86427521e7b07e0adb0c9c554ddb2', 'timestamp': 1487533280, 'upload_date': '20170219', 'duration': 2562, 'series': 'Allemaal Chris', 'season': 'Allemaal Chris', 'season_number': 1, 'season_id': '256936078124527', 'episode': 'Allemaal Chris afl. 6', 'episode_number': 6, 'episode_id': '256936078591527', }, 'params': { 'skip_download': True, }, 'skip': 'Requires account credentials', }, { # clip 'url': 'http://vtm.be/video?aid=168332', 'info_dict': { 'id': '168332', 'ext': 'mp4', 'title': '"Veronique liegt!"', 'description': 'md5:1385e2b743923afe54ba4adc38476155', 'timestamp': 1489002029, 'upload_date': '20170308', 'duration': 96, }, }, { # vod 'url': 'http://vtm.be/video/volledige-afleveringen/id/257107153551000', 'only_matching': True, }, { # vod 'url': 'http://vtm.be/video?aid=163157', 'only_matching': True, }, { # vod 'url': 'http://www.q2.be/video/volledige-afleveringen/id/2be_20170301_VM0684442_q2', 'only_matching': True, }, { # clip 'url': 'http://vtmkzoom.be/k3-dansstudio/een-nieuw-seizoen-van-k3-dansstudio', 'only_matching': True, }, { # http/s redirect 'url': 'https://vtmkzoom.be/video?aid=45724', 'info_dict': { 'id': '257136373657000', 'ext': 'mp4', 'title': 'K3 Dansstudio Ushuaia afl.6', }, 'params': { 'skip_download': True, }, 'skip': 'Requires account credentials', }, { # nieuws.vtm.be 'url': 'https://nieuws.vtm.be/stadion/stadion/genk-nog-moeilijk-programma', 'only_matching': True, }] def _real_initialize(self): self._logged_in = False def _login(self): username, password = self._get_login_info() if username is None: self.raise_login_required() auth_data = { 'APIKey': self._APIKEY, 'sdk': 'js_6.1', 'format': 'json', 'loginID': username, 'password': password, } auth_info = self._gigya_login(auth_data) self._uid = auth_info['UID'] self._uid_signature = auth_info['UIDSignature'] self._signature_timestamp = auth_info['signatureTimestamp'] self._logged_in = True def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id, site_id = mobj.group('id', 'site_id') webpage = self._download_webpage(url, video_id) config = self._parse_json( self._search_regex( r'videoJSConfig\s*=\s*JSON\.parse\(\'({.+?})\'\);', webpage, 'config', default='{}'), video_id, transform_source=lambda s: s.replace( '\\\\', '\\').replace(r'\"', '"').replace(r"\'", "'")) vod_id = config.get('vodId') or self._search_regex( (r'\\"vodId\\"\s*:\s*\\"(.+?)\\"', r'"vodId"\s*:\s*"(.+?)"', r'<[^>]+id=["\']vod-(\d+)'), webpage, 'video_id', default=None) # clip, no authentication required if not vod_id: player = self._parse_json( self._search_regex( r'vmmaplayer\(({.+?})\);', webpage, 'vmma player', default=''), video_id, transform_source=lambda s: '[%s]' % s, fatal=False) if player: video = player[-1] if video['videoUrl'] in ('http', 'https'): return self.url_result(video['url'], MedialaanIE.ie_key()) info = { 'id': video_id, 'url': video['videoUrl'], 'title': video['title'], 'thumbnail': video.get('imageUrl'), 'timestamp': int_or_none(video.get('createdDate')), 'duration': int_or_none(video.get('duration')), } else: info = self._parse_html5_media_entries( url, webpage, video_id, m3u8_id='hls')[0] info.update({ 'id': video_id, 'title': self._html_search_meta('description', webpage), 'duration': parse_duration(self._html_search_meta('duration', webpage)), }) # vod, authentication required else: if not self._logged_in: self._login() settings = self._parse_json( self._search_regex( r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', webpage, 'drupal settings', default='{}'), video_id) def get(container, item): return try_get( settings, lambda x: x[container][item], compat_str) or self._search_regex( r'"%s"\s*:\s*"([^"]+)' % item, webpage, item, default=None) app_id = get('vod', 'app_id') or self._SITE_TO_APP_ID.get(site_id, 'vtm_watch') sso = get('vod', 'gigyaDatabase') or 'vtm-sso' data = self._download_json( 'http://vod.medialaan.io/api/1.0/item/%s/video' % vod_id, video_id, query={ 'app_id': app_id, 'user_network': sso, 'UID': self._uid, 'UIDSignature': self._uid_signature, 'signatureTimestamp': self._signature_timestamp, }) formats = self._extract_m3u8_formats( data['response']['uri'], video_id, entry_protocol='m3u8_native', ext='mp4', m3u8_id='hls') self._sort_formats(formats) info = { 'id': vod_id, 'formats': formats, } api_key = get('vod', 'apiKey') channel = get('medialaanGigya', 'channel') if api_key: videos = self._download_json( 'http://vod.medialaan.io/vod/v2/videos', video_id, fatal=False, query={ 'channels': channel, 'ids': vod_id, 'limit': 1, 'apikey': api_key, }) if videos: video = try_get( videos, lambda x: x['response']['videos'][0], dict) if video: def get(container, item, expected_type=None): return try_get( video, lambda x: x[container][item], expected_type) def get_string(container, item): return get(container, item, compat_str) info.update({ 'series': get_string('program', 'title'), 'season': get_string('season', 'title'), 'season_number': int_or_none(get('season', 'number')), 'season_id': get_string('season', 'id'), 'episode': get_string('episode', 'title'), 'episode_number': int_or_none(get('episode', 'number')), 'episode_id': get_string('episode', 'id'), 'duration': int_or_none( video.get('duration')) or int_or_none( video.get('durationMillis'), scale=1000), 'title': get_string('episode', 'title'), 'description': get_string('episode', 'text'), 'timestamp': unified_timestamp(get_string( 'publication', 'begin')), }) if not info.get('title'): info['title'] = try_get( config, lambda x: x['videoConfig']['title'], compat_str) or self._html_search_regex( r'\\"title\\"\s*:\s*\\"(.+?)\\"', webpage, 'title', default=None) or self._og_search_title(webpage) if not info.get('description'): info['description'] = self._html_search_regex( r'<div[^>]+class="field-item\s+even">\s*<p>(.+?)</p>', webpage, 'description', default=None) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mediaset.py
# coding: utf-8 from __future__ import unicode_literals import re from .theplatform import ThePlatformBaseIE from ..compat import ( compat_parse_qs, compat_str, compat_urllib_parse_urlparse, ) from ..utils import ( ExtractorError, int_or_none, update_url_query, ) class MediasetIE(ThePlatformBaseIE): _TP_TLD = 'eu' _VALID_URL = r'''(?x) (?: mediaset:| https?:// (?:(?:www|static3)\.)?mediasetplay\.mediaset\.it/ (?: (?:video|on-demand)/(?:[^/]+/)+[^/]+_| player/index\.html\?.*?\bprogramGuid= ) )(?P<id>[0-9A-Z]{16}) ''' _TESTS = [{ # full episode 'url': 'https://www.mediasetplay.mediaset.it/video/hellogoodbye/quarta-puntata_FAFU000000661824', 'md5': '9b75534d42c44ecef7bf1ffeacb7f85d', 'info_dict': { 'id': 'FAFU000000661824', 'ext': 'mp4', 'title': 'Quarta puntata', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1414.26, 'upload_date': '20161107', 'series': 'Hello Goodbye', 'timestamp': 1478532900, 'uploader': 'Rete 4', 'uploader_id': 'R4', }, }, { 'url': 'https://www.mediasetplay.mediaset.it/video/matrix/puntata-del-25-maggio_F309013801000501', 'md5': '288532f0ad18307705b01e581304cd7b', 'info_dict': { 'id': 'F309013801000501', 'ext': 'mp4', 'title': 'Puntata del 25 maggio', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 6565.007, 'upload_date': '20180526', 'series': 'Matrix', 'timestamp': 1527326245, 'uploader': 'Canale 5', 'uploader_id': 'C5', }, 'expected_warnings': ['HTTP Error 403: Forbidden'], }, { # clip 'url': 'https://www.mediasetplay.mediaset.it/video/gogglebox/un-grande-classico-della-commedia-sexy_FAFU000000661680', 'only_matching': True, }, { # iframe simple 'url': 'https://static3.mediasetplay.mediaset.it/player/index.html?appKey=5ad3966b1de1c4000d5cec48&programGuid=FAFU000000665924&id=665924', 'only_matching': True, }, { # iframe twitter (from http://www.wittytv.it/se-prima-mi-fidavo-zero/) 'url': 'https://static3.mediasetplay.mediaset.it/player/index.html?appKey=5ad3966b1de1c4000d5cec48&programGuid=FAFU000000665104&id=665104', 'only_matching': True, }, { 'url': 'mediaset:FAFU000000665924', 'only_matching': True, }] @staticmethod def _extract_urls(ie, webpage): def _qs(url): return compat_parse_qs(compat_urllib_parse_urlparse(url).query) def _program_guid(qs): return qs.get('programGuid', [None])[0] entries = [] for mobj in re.finditer( r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?video\.mediaset\.it/player/playerIFrame(?:Twitter)?\.shtml.*?)\1', webpage): embed_url = mobj.group('url') embed_qs = _qs(embed_url) program_guid = _program_guid(embed_qs) if program_guid: entries.append(embed_url) continue video_id = embed_qs.get('id', [None])[0] if not video_id: continue urlh = ie._request_webpage( embed_url, video_id, note='Following embed URL redirect') embed_url = compat_str(urlh.geturl()) program_guid = _program_guid(_qs(embed_url)) if program_guid: entries.append(embed_url) return entries def _real_extract(self, url): guid = self._match_id(url) tp_path = 'PR1GhC/media/guid/2702976343/' + guid info = self._extract_theplatform_metadata(tp_path, guid) formats = [] subtitles = {} first_e = None for asset_type in ('SD', 'HD'): for f in ('MPEG4', 'MPEG-DASH', 'M3U', 'ISM'): try: tp_formats, tp_subtitles = self._extract_theplatform_smil( update_url_query('http://link.theplatform.%s/s/%s' % (self._TP_TLD, tp_path), { 'mbr': 'true', 'formats': f, 'assetTypes': asset_type, }), guid, 'Downloading %s %s SMIL data' % (f, asset_type)) except ExtractorError as e: if not first_e: first_e = e break for tp_f in tp_formats: tp_f['quality'] = 1 if asset_type == 'HD' else 0 formats.extend(tp_formats) subtitles = self._merge_subtitles(subtitles, tp_subtitles) if first_e and not formats: raise first_e self._sort_formats(formats) fields = [] for templ, repls in (('tvSeason%sNumber', ('', 'Episode')), ('mediasetprogram$%s', ('brandTitle', 'numberOfViews', 'publishInfo'))): fields.extend(templ % repl for repl in repls) feed_data = self._download_json( 'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs/guid/-/' + guid, guid, fatal=False, query={'fields': ','.join(fields)}) if feed_data: publish_info = feed_data.get('mediasetprogram$publishInfo') or {} info.update({ 'episode_number': int_or_none(feed_data.get('tvSeasonEpisodeNumber')), 'season_number': int_or_none(feed_data.get('tvSeasonNumber')), 'series': feed_data.get('mediasetprogram$brandTitle'), 'uploader': publish_info.get('description'), 'uploader_id': publish_info.get('channel'), 'view_count': int_or_none(feed_data.get('mediasetprogram$numberOfViews')), }) info.update({ 'id': guid, 'formats': formats, 'subtitles': subtitles, }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mediasite.py
# coding: utf-8 from __future__ import unicode_literals import re import json from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( ExtractorError, float_or_none, mimetype2ext, str_or_none, try_get, unescapeHTML, unsmuggle_url, url_or_none, urljoin, ) _ID_RE = r'(?:[0-9a-f]{32,34}|[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12,14})' class MediasiteIE(InfoExtractor): _VALID_URL = r'(?xi)https?://[^/]+/Mediasite/(?:Play|Showcase/(?:default|livebroadcast)/Presentation)/(?P<id>%s)(?P<query>\?[^#]+|)' % _ID_RE _TESTS = [ { 'url': 'https://hitsmediaweb.h-its.org/mediasite/Play/2db6c271681e4f199af3c60d1f82869b1d', 'info_dict': { 'id': '2db6c271681e4f199af3c60d1f82869b1d', 'ext': 'mp4', 'title': 'Lecture: Tuesday, September 20, 2016 - Sir Andrew Wiles', 'description': 'Sir Andrew Wiles: “Equations in arithmetic”\\n\\nI will describe some of the interactions between modern number theory and the problem of solving equations in rational numbers or integers\\u0027.', 'timestamp': 1474268400.0, 'upload_date': '20160919', }, }, { 'url': 'http://mediasite.uib.no/Mediasite/Play/90bb363295d945d6b548c867d01181361d?catalog=a452b7df-9ae1-46b7-a3ba-aceeb285f3eb', 'info_dict': { 'id': '90bb363295d945d6b548c867d01181361d', 'ext': 'mp4', 'upload_date': '20150429', 'title': '5) IT-forum 2015-Dag 1 - Dungbeetle - How and why Rain created a tiny bug tracker for Unity', 'timestamp': 1430311380.0, }, }, { 'url': 'https://collegerama.tudelft.nl/Mediasite/Play/585a43626e544bdd97aeb71a0ec907a01d', 'md5': '481fda1c11f67588c0d9d8fbdced4e39', 'info_dict': { 'id': '585a43626e544bdd97aeb71a0ec907a01d', 'ext': 'mp4', 'title': 'Een nieuwe wereld: waarden, bewustzijn en techniek van de mensheid 2.0.', 'description': '', 'thumbnail': r're:^https?://.*\.jpg(?:\?.*)?$', 'duration': 7713.088, 'timestamp': 1413309600, 'upload_date': '20141014', }, }, { 'url': 'https://collegerama.tudelft.nl/Mediasite/Play/86a9ea9f53e149079fbdb4202b521ed21d?catalog=fd32fd35-6c99-466c-89d4-cd3c431bc8a4', 'md5': 'ef1fdded95bdf19b12c5999949419c92', 'info_dict': { 'id': '86a9ea9f53e149079fbdb4202b521ed21d', 'ext': 'wmv', 'title': '64ste Vakantiecursus: Afvalwater', 'description': 'md5:7fd774865cc69d972f542b157c328305', 'thumbnail': r're:^https?://.*\.jpg(?:\?.*?)?$', 'duration': 10853, 'timestamp': 1326446400, 'upload_date': '20120113', }, }, { 'url': 'http://digitalops.sandia.gov/Mediasite/Play/24aace4429fc450fb5b38cdbf424a66e1d', 'md5': '9422edc9b9a60151727e4b6d8bef393d', 'info_dict': { 'id': '24aace4429fc450fb5b38cdbf424a66e1d', 'ext': 'mp4', 'title': 'Xyce Software Training - Section 1', 'description': r're:(?s)SAND Number: SAND 2013-7800.{200,}', 'upload_date': '20120409', 'timestamp': 1333983600, 'duration': 7794, } }, { 'url': 'https://collegerama.tudelft.nl/Mediasite/Showcase/livebroadcast/Presentation/ada7020854f743c49fbb45c9ec7dbb351d', 'only_matching': True, }, { 'url': 'https://mediasite.ntnu.no/Mediasite/Showcase/default/Presentation/7d8b913259334b688986e970fae6fcb31d', 'only_matching': True, }, { # dashed id 'url': 'https://hitsmediaweb.h-its.org/mediasite/Play/2db6c271-681e-4f19-9af3-c60d1f82869b1d', 'only_matching': True, } ] # look in Mediasite.Core.js (Mediasite.ContentStreamType[*]) _STREAM_TYPES = { 0: 'video1', # the main video 2: 'slide', 3: 'presentation', 4: 'video2', # screencast? 5: 'video3', } @staticmethod def _extract_urls(webpage): return [ unescapeHTML(mobj.group('url')) for mobj in re.finditer( r'(?xi)<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:(?:https?:)?//[^/]+)?/Mediasite/Play/%s(?:\?.*?)?)\1' % _ID_RE, webpage)] def _real_extract(self, url): url, data = unsmuggle_url(url, {}) mobj = re.match(self._VALID_URL, url) resource_id = mobj.group('id') query = mobj.group('query') webpage, urlh = self._download_webpage_handle(url, resource_id) # XXX: add UrlReferrer? redirect_url = compat_str(urlh.geturl()) # XXX: might have also extracted UrlReferrer and QueryString from the html service_path = compat_urlparse.urljoin(redirect_url, self._html_search_regex( r'<div[^>]+\bid=["\']ServicePath[^>]+>(.+?)</div>', webpage, resource_id, default='/Mediasite/PlayerService/PlayerService.svc/json')) player_options = self._download_json( '%s/GetPlayerOptions' % service_path, resource_id, headers={ 'Content-type': 'application/json; charset=utf-8', 'X-Requested-With': 'XMLHttpRequest', }, data=json.dumps({ 'getPlayerOptionsRequest': { 'ResourceId': resource_id, 'QueryString': query, 'UrlReferrer': data.get('UrlReferrer', ''), 'UseScreenReader': False, } }).encode('utf-8'))['d'] presentation = player_options['Presentation'] title = presentation['Title'] if presentation is None: raise ExtractorError( 'Mediasite says: %s' % player_options['PlayerPresentationStatusMessage'], expected=True) thumbnails = [] formats = [] for snum, Stream in enumerate(presentation['Streams']): stream_type = Stream.get('StreamType') if stream_type is None: continue video_urls = Stream.get('VideoUrls') if not isinstance(video_urls, list): video_urls = [] stream_id = self._STREAM_TYPES.get( stream_type, 'type%u' % stream_type) stream_formats = [] for unum, VideoUrl in enumerate(video_urls): video_url = url_or_none(VideoUrl.get('Location')) if not video_url: continue # XXX: if Stream.get('CanChangeScheme', False), switch scheme to HTTP/HTTPS media_type = VideoUrl.get('MediaType') if media_type == 'SS': stream_formats.extend(self._extract_ism_formats( video_url, resource_id, ism_id='%s-%u.%u' % (stream_id, snum, unum), fatal=False)) elif media_type == 'Dash': stream_formats.extend(self._extract_mpd_formats( video_url, resource_id, mpd_id='%s-%u.%u' % (stream_id, snum, unum), fatal=False)) else: stream_formats.append({ 'format_id': '%s-%u.%u' % (stream_id, snum, unum), 'url': video_url, 'ext': mimetype2ext(VideoUrl.get('MimeType')), }) # TODO: if Stream['HasSlideContent']: # synthesise an MJPEG video stream '%s-%u.slides' % (stream_type, snum) # from Stream['Slides'] # this will require writing a custom downloader... # disprefer 'secondary' streams if stream_type != 0: for fmt in stream_formats: fmt['preference'] = -1 thumbnail_url = Stream.get('ThumbnailUrl') if thumbnail_url: thumbnails.append({ 'id': '%s-%u' % (stream_id, snum), 'url': urljoin(redirect_url, thumbnail_url), 'preference': -1 if stream_type != 0 else 0, }) formats.extend(stream_formats) self._sort_formats(formats) # XXX: Presentation['Presenters'] # XXX: Presentation['Transcript'] return { 'id': resource_id, 'title': title, 'description': presentation.get('Description'), 'duration': float_or_none(presentation.get('Duration'), 1000), 'timestamp': float_or_none(presentation.get('UnixTime'), 1000), 'formats': formats, 'thumbnails': thumbnails, } class MediasiteCatalogIE(InfoExtractor): _VALID_URL = r'''(?xi) (?P<url>https?://[^/]+/Mediasite) /Catalog/Full/ (?P<catalog_id>{0}) (?: /(?P<current_folder_id>{0}) /(?P<root_dynamic_folder_id>{0}) )? '''.format(_ID_RE) _TESTS = [{ 'url': 'http://events7.mediasite.com/Mediasite/Catalog/Full/631f9e48530d454381549f955d08c75e21', 'info_dict': { 'id': '631f9e48530d454381549f955d08c75e21', 'title': 'WCET Summit: Adaptive Learning in Higher Ed: Improving Outcomes Dynamically', }, 'playlist_count': 6, 'expected_warnings': ['is not a supported codec'], }, { # with CurrentFolderId and RootDynamicFolderId 'url': 'https://medaudio.medicine.iu.edu/Mediasite/Catalog/Full/9518c4a6c5cf4993b21cbd53e828a92521/97a9db45f7ab47428c77cd2ed74bb98f14/9518c4a6c5cf4993b21cbd53e828a92521', 'info_dict': { 'id': '9518c4a6c5cf4993b21cbd53e828a92521', 'title': 'IUSM Family and Friends Sessions', }, 'playlist_count': 2, }, { 'url': 'http://uipsyc.mediasite.com/mediasite/Catalog/Full/d5d79287c75243c58c50fef50174ec1b21', 'only_matching': True, }, { # no AntiForgeryToken 'url': 'https://live.libraries.psu.edu/Mediasite/Catalog/Full/8376d4b24dd1457ea3bfe4cf9163feda21', 'only_matching': True, }, { 'url': 'https://medaudio.medicine.iu.edu/Mediasite/Catalog/Full/9518c4a6c5cf4993b21cbd53e828a92521/97a9db45f7ab47428c77cd2ed74bb98f14/9518c4a6c5cf4993b21cbd53e828a92521', 'only_matching': True, }, { # dashed id 'url': 'http://events7.mediasite.com/Mediasite/Catalog/Full/631f9e48-530d-4543-8154-9f955d08c75e', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) mediasite_url = mobj.group('url') catalog_id = mobj.group('catalog_id') current_folder_id = mobj.group('current_folder_id') or catalog_id root_dynamic_folder_id = mobj.group('root_dynamic_folder_id') webpage = self._download_webpage(url, catalog_id) # AntiForgeryToken is optional (e.g. [1]) # 1. https://live.libraries.psu.edu/Mediasite/Catalog/Full/8376d4b24dd1457ea3bfe4cf9163feda21 anti_forgery_token = self._search_regex( r'AntiForgeryToken\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'anti forgery token', default=None, group='value') if anti_forgery_token: anti_forgery_header = self._search_regex( r'AntiForgeryHeaderName\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'anti forgery header name', default='X-SOFO-AntiForgeryHeader', group='value') data = { 'IsViewPage': True, 'IsNewFolder': True, 'AuthTicket': None, 'CatalogId': catalog_id, 'CurrentFolderId': current_folder_id, 'RootDynamicFolderId': root_dynamic_folder_id, 'ItemsPerPage': 1000, 'PageIndex': 0, 'PermissionMask': 'Execute', 'CatalogSearchType': 'SearchInFolder', 'SortBy': 'Date', 'SortDirection': 'Descending', 'StartDate': None, 'EndDate': None, 'StatusFilterList': None, 'PreviewKey': None, 'Tags': [], } headers = { 'Content-Type': 'application/json; charset=UTF-8', 'Referer': url, 'X-Requested-With': 'XMLHttpRequest', } if anti_forgery_token: headers[anti_forgery_header] = anti_forgery_token catalog = self._download_json( '%s/Catalog/Data/GetPresentationsForFolder' % mediasite_url, catalog_id, data=json.dumps(data).encode(), headers=headers) entries = [] for video in catalog['PresentationDetailsList']: if not isinstance(video, dict): continue video_id = str_or_none(video.get('Id')) if not video_id: continue entries.append(self.url_result( '%s/Play/%s' % (mediasite_url, video_id), ie=MediasiteIE.ie_key(), video_id=video_id)) title = try_get( catalog, lambda x: x['CurrentFolder']['Name'], compat_str) return self.playlist_result(entries, catalog_id, title,) class MediasiteNamedCatalogIE(InfoExtractor): _VALID_URL = r'(?xi)(?P<url>https?://[^/]+/Mediasite)/Catalog/catalogs/(?P<catalog_name>[^/?#&]+)' _TESTS = [{ 'url': 'https://msite.misis.ru/Mediasite/Catalog/catalogs/2016-industrial-management-skriabin-o-o', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) mediasite_url = mobj.group('url') catalog_name = mobj.group('catalog_name') webpage = self._download_webpage(url, catalog_name) catalog_id = self._search_regex( r'CatalogId\s*:\s*["\'](%s)' % _ID_RE, webpage, 'catalog id') return self.url_result( '%s/Catalog/Full/%s' % (mediasite_url, catalog_id), ie=MediasiteCatalogIE.ie_key(), video_id=catalog_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/medici.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( unified_strdate, update_url_query, urlencode_postdata, ) class MediciIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?medici\.tv/#!/(?P<id>[^?#&]+)' _TEST = { 'url': 'http://www.medici.tv/#!/daniel-harding-frans-helmerson-verbier-festival-music-camp', 'md5': '004c21bb0a57248085b6ff3fec72719d', 'info_dict': { 'id': '3059', 'ext': 'flv', 'title': 'Daniel Harding conducts the Verbier Festival Music Camp \u2013 With Frans Helmerson', 'description': 'md5:322a1e952bafb725174fd8c1a8212f58', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20170408', }, } def _real_extract(self, url): video_id = self._match_id(url) # Sets csrftoken cookie self._download_webpage(url, video_id) MEDICI_URL = 'http://www.medici.tv/' data = self._download_json( MEDICI_URL, video_id, data=urlencode_postdata({ 'json': 'true', 'page': '/%s' % video_id, 'timezone_offset': -420, }), headers={ 'X-CSRFToken': self._get_cookies(url)['csrftoken'].value, 'X-Requested-With': 'XMLHttpRequest', 'Referer': MEDICI_URL, 'Content-Type': 'application/x-www-form-urlencoded', }) video = data['video']['videos']['video1'] title = video.get('nom') or data['title'] video_id = video.get('id') or video_id formats = self._extract_f4m_formats( update_url_query(video['url_akamai'], { 'hdcore': '3.1.0', 'plugin=aasp': '3.1.0.43.124', }), video_id, f4m_id='hds') description = data.get('meta_description') thumbnail = video.get('url_thumbnail') or data.get('main_image') upload_date = unified_strdate(data['video'].get('date')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'upload_date': upload_date, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/megaphone.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import js_to_json class MegaphoneIE(InfoExtractor): IE_NAME = 'megaphone.fm' IE_DESC = 'megaphone.fm embedded players' _VALID_URL = r'https://player\.megaphone\.fm/(?P<id>[A-Z0-9]+)' _TEST = { 'url': 'https://player.megaphone.fm/GLT9749789991?"', 'md5': '4816a0de523eb3e972dc0dda2c191f96', 'info_dict': { 'id': 'GLT9749789991', 'ext': 'mp3', 'title': '#97 What Kind Of Idiot Gets Phished?', 'thumbnail': r're:^https://.*\.png.*$', 'duration': 1776.26375, 'author': 'Reply All', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_property('audio:title', webpage) author = self._og_search_property('audio:artist', webpage) thumbnail = self._og_search_thumbnail(webpage) episode_json = self._search_regex(r'(?s)var\s+episode\s*=\s*(\{.+?\});', webpage, 'episode JSON') episode_data = self._parse_json(episode_json, video_id, js_to_json) video_url = self._proto_relative_url(episode_data['mediaUrl'], 'https:') formats = [{ 'url': video_url, }] return { 'id': video_id, 'thumbnail': thumbnail, 'title': title, 'author': author, 'duration': episode_data['duration'], 'formats': formats, } @classmethod def _extract_urls(cls, webpage): return [m[0] for m in re.findall( r'<iframe[^>]*?\ssrc=["\'](%s)' % cls._VALID_URL, webpage)]
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/meipai.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, unified_timestamp, ) class MeipaiIE(InfoExtractor): IE_DESC = '美拍' _VALID_URL = r'https?://(?:www\.)?meipai\.com/media/(?P<id>[0-9]+)' _TESTS = [{ # regular uploaded video 'url': 'http://www.meipai.com/media/531697625', 'md5': 'e3e9600f9e55a302daecc90825854b4f', 'info_dict': { 'id': '531697625', 'ext': 'mp4', 'title': '#葉子##阿桑##余姿昀##超級女聲#', 'description': '#葉子##阿桑##余姿昀##超級女聲#', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 152, 'timestamp': 1465492420, 'upload_date': '20160609', 'view_count': 35511, 'creator': '她她-TATA', 'tags': ['葉子', '阿桑', '余姿昀', '超級女聲'], } }, { # record of live streaming 'url': 'http://www.meipai.com/media/585526361', 'md5': 'ff7d6afdbc6143342408223d4f5fb99a', 'info_dict': { 'id': '585526361', 'ext': 'mp4', 'title': '姿昀和善願 練歌練琴啦😁😁😁', 'description': '姿昀和善願 練歌練琴啦😁😁😁', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 5975, 'timestamp': 1474311799, 'upload_date': '20160919', 'view_count': 1215, 'creator': '她她-TATA', } }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_title( webpage, default=None) or self._html_search_regex( r'<title[^>]*>([^<]+)</title>', webpage, 'title') formats = [] # recorded playback of live streaming m3u8_url = self._html_search_regex( r'file:\s*encodeURIComponent\((["\'])(?P<url>(?:(?!\1).)+)\1\)', webpage, 'm3u8 url', group='url', default=None) if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) if not formats: # regular uploaded video video_url = self._search_regex( r'data-video=(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'video url', group='url', default=None) if video_url: formats.append({ 'url': video_url, 'format_id': 'http', }) timestamp = unified_timestamp(self._og_search_property( 'video:release_date', webpage, 'release date', fatal=False)) tags = self._og_search_property( 'video:tag', webpage, 'tags', default='').split(',') view_count = int_or_none(self._html_search_meta( 'interactionCount', webpage, 'view count')) duration = parse_duration(self._html_search_meta( 'duration', webpage, 'duration')) creator = self._og_search_property( 'video:director', webpage, 'creator', fatal=False) return { 'id': video_id, 'title': title, 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), 'duration': duration, 'timestamp': timestamp, 'view_count': view_count, 'creator': creator, 'tags': tags, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/melonvod.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, urljoin, ) class MelonVODIE(InfoExtractor): _VALID_URL = r'https?://vod\.melon\.com/video/detail2\.html?\?.*?mvId=(?P<id>[0-9]+)' _TEST = { 'url': 'http://vod.melon.com/video/detail2.htm?mvId=50158734', 'info_dict': { 'id': '50158734', 'ext': 'mp4', 'title': "Jessica 'Wonderland' MV Making Film", 'thumbnail': r're:^https?://.*\.jpg$', 'artist': 'Jessica (제시카)', 'upload_date': '20161212', 'duration': 203, }, 'params': { 'skip_download': 'm3u8 download', } } def _real_extract(self, url): video_id = self._match_id(url) play_info = self._download_json( 'http://vod.melon.com/video/playerInfo.json', video_id, note='Downloading player info JSON', query={'mvId': video_id}) title = play_info['mvInfo']['MVTITLE'] info = self._download_json( 'http://vod.melon.com/delivery/streamingInfo.json', video_id, note='Downloading streaming info JSON', query={ 'contsId': video_id, 'contsType': 'VIDEO', }) stream_info = info['streamingInfo'] formats = self._extract_m3u8_formats( stream_info['encUrl'], video_id, 'mp4', m3u8_id='hls') self._sort_formats(formats) artist_list = play_info.get('artistList') artist = None if isinstance(artist_list, list): artist = ', '.join( [a['ARTISTNAMEWEBLIST'] for a in artist_list if a.get('ARTISTNAMEWEBLIST')]) thumbnail = urljoin(info.get('staticDomain'), stream_info.get('imgPath')) duration = int_or_none(stream_info.get('playTime')) upload_date = stream_info.get('mvSvcOpenDt', '')[:8] or None return { 'id': video_id, 'title': title, 'artist': artist, 'thumbnail': thumbnail, 'upload_date': upload_date, 'duration': duration, 'formats': formats }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/meta.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from .pladform import PladformIE from ..utils import ( unescapeHTML, int_or_none, ExtractorError, ) class METAIE(InfoExtractor): _VALID_URL = r'https?://video\.meta\.ua/(?:iframe/)?(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://video.meta.ua/5502115.video', 'md5': '71b6f3ee274bef16f1ab410f7f56b476', 'info_dict': { 'id': '5502115', 'ext': 'mp4', 'title': 'Sony Xperia Z camera test [HQ]', 'description': 'Xperia Z shoots video in FullHD HDR.', 'uploader_id': 'nomobile', 'uploader': 'CHЁZA.TV', 'upload_date': '20130211', }, 'add_ie': ['Youtube'], }, { 'url': 'http://video.meta.ua/iframe/5502115', 'only_matching': True, }, { # pladform embed 'url': 'http://video.meta.ua/7121015.video', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) st_html5 = self._search_regex( r"st_html5\s*=\s*'#([^']+)'", webpage, 'uppod html5 st', default=None) if st_html5: # uppod st decryption algorithm is reverse engineered from function un(s) at uppod.js json_str = '' for i in range(0, len(st_html5), 3): json_str += '&#x0%s;' % st_html5[i:i + 3] uppod_data = self._parse_json(unescapeHTML(json_str), video_id) error = uppod_data.get('customnotfound') if error: raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True) video_url = uppod_data['file'] info = { 'id': video_id, 'url': video_url, 'title': uppod_data.get('comment') or self._og_search_title(webpage), 'description': self._og_search_description(webpage, default=None), 'thumbnail': uppod_data.get('poster') or self._og_search_thumbnail(webpage), 'duration': int_or_none(self._og_search_property( 'video:duration', webpage, default=None)), } if 'youtube.com/' in video_url: info.update({ '_type': 'url_transparent', 'ie_key': 'Youtube', }) return info pladform_url = PladformIE._extract_url(webpage) if pladform_url: return self.url_result(pladform_url)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/metacafe.py
from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urllib_parse, compat_urllib_parse_unquote, ) from ..utils import ( determine_ext, ExtractorError, int_or_none, get_element_by_attribute, mimetype2ext, ) class MetacafeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?metacafe\.com/watch/(?P<video_id>[^/]+)/(?P<display_id>[^/?#]+)' _DISCLAIMER = 'http://www.metacafe.com/family_filter/' _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user' IE_NAME = 'metacafe' _TESTS = [ # Youtube video { 'add_ie': ['Youtube'], 'url': 'http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/', 'info_dict': { 'id': '_aUehQsCQtM', 'ext': 'mp4', 'upload_date': '20090102', 'title': 'The Electric Company | "Short I" | PBS KIDS GO!', 'description': 'md5:2439a8ef6d5a70e380c22f5ad323e5a8', 'uploader': 'PBS', 'uploader_id': 'PBS' } }, # Normal metacafe video { 'url': 'http://www.metacafe.com/watch/11121940/news_stuff_you_wont_do_with_your_playstation_4/', 'md5': '6e0bca200eaad2552e6915ed6fd4d9ad', 'info_dict': { 'id': '11121940', 'ext': 'mp4', 'title': 'News: Stuff You Won\'t Do with Your PlayStation 4', 'uploader': 'ign', 'description': 'Sony released a massive FAQ on the PlayStation Blog detailing the PS4\'s capabilities and limitations.', }, 'skip': 'Page is temporarily unavailable.', }, # metacafe video with family filter { 'url': 'http://www.metacafe.com/watch/2155630/adult_art_by_david_hart_156/', 'md5': 'b06082c5079bbdcde677a6291fbdf376', 'info_dict': { 'id': '2155630', 'ext': 'mp4', 'title': 'Adult Art By David Hart 156', 'uploader': '63346', 'description': 'md5:9afac8fc885252201ad14563694040fc', }, 'params': { 'skip_download': True, }, }, # AnyClip video { 'url': 'http://www.metacafe.com/watch/an-dVVXnuY7Jh77J/the_andromeda_strain_1971_stop_the_bomb_part_3/', 'info_dict': { 'id': 'an-dVVXnuY7Jh77J', 'ext': 'mp4', 'title': 'The Andromeda Strain (1971): Stop the Bomb Part 3', 'uploader': 'AnyClip', 'description': 'md5:cbef0460d31e3807f6feb4e7a5952e5b', }, }, # age-restricted video { 'url': 'http://www.metacafe.com/watch/5186653/bbc_internal_christmas_tape_79_uncensored_outtakes_etc/', 'md5': '98dde7c1a35d02178e8ab7560fe8bd09', 'info_dict': { 'id': '5186653', 'ext': 'mp4', 'title': 'BBC INTERNAL Christmas Tape \'79 - UNCENSORED Outtakes, Etc.', 'uploader': 'Dwayne Pipe', 'description': 'md5:950bf4c581e2c059911fa3ffbe377e4b', 'age_limit': 18, }, }, # cbs video { 'url': 'http://www.metacafe.com/watch/cb-8VD4r_Zws8VP/open_this_is_face_the_nation_february_9/', 'info_dict': { 'id': '8VD4r_Zws8VP', 'ext': 'flv', 'title': 'Open: This is Face the Nation, February 9', 'description': 'md5:8a9ceec26d1f7ed6eab610834cc1a476', 'duration': 96, 'uploader': 'CBSI-NEW', 'upload_date': '20140209', 'timestamp': 1391959800, }, 'params': { # rtmp download 'skip_download': True, }, }, # Movieclips.com video { 'url': 'http://www.metacafe.com/watch/mv-Wy7ZU/my_week_with_marilyn_do_you_love_me/', 'info_dict': { 'id': 'mv-Wy7ZU', 'ext': 'mp4', 'title': 'My Week with Marilyn - Do You Love Me?', 'description': 'From the movie My Week with Marilyn - Colin (Eddie Redmayne) professes his love to Marilyn (Michelle Williams) and gets her to promise to return to set and finish the movie.', 'uploader': 'movie_trailers', 'duration': 176, }, 'params': { 'skip_download': 'requires rtmpdump', } } ] def report_disclaimer(self): self.to_screen('Retrieving disclaimer') def _real_extract(self, url): # Extract id and simplified title from URL video_id, display_id = re.match(self._VALID_URL, url).groups() # the video may come from an external site m_external = re.match(r'^(\w{2})-(.*)$', video_id) if m_external is not None: prefix, ext_id = m_external.groups() # Check if video comes from YouTube if prefix == 'yt': return self.url_result('http://www.youtube.com/watch?v=%s' % ext_id, 'Youtube') # CBS videos use theplatform.com if prefix == 'cb': return self.url_result('theplatform:%s' % ext_id, 'ThePlatform') headers = { # Disable family filter 'Cookie': 'user=%s; ' % compat_urllib_parse.quote(json.dumps({'ffilter': False})) } # AnyClip videos require the flashversion cookie so that we get the link # to the mp4 file if video_id.startswith('an-'): headers['Cookie'] += 'flashVersion=0; ' # Retrieve video webpage to extract further information webpage = self._download_webpage(url, video_id, headers=headers) error = get_element_by_attribute( 'class', 'notfound-page-title', webpage) if error: raise ExtractorError(error, expected=True) video_title = self._html_search_meta( ['og:title', 'twitter:title'], webpage, 'title', default=None) or self._search_regex(r'<h1>(.*?)</h1>', webpage, 'title') # Extract URL, uploader and title from webpage self.report_extraction(video_id) video_url = None mobj = re.search(r'(?m)&(?:media|video)URL=([^&]+)', webpage) if mobj is not None: mediaURL = compat_urllib_parse_unquote(mobj.group(1)) video_ext = determine_ext(mediaURL) # Extract gdaKey if available mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage) if mobj is None: video_url = mediaURL else: gdaKey = mobj.group(1) video_url = '%s?__gda__=%s' % (mediaURL, gdaKey) if video_url is None: mobj = re.search(r'<video src="([^"]+)"', webpage) if mobj: video_url = mobj.group(1) video_ext = 'mp4' if video_url is None: flashvars = self._search_regex( r' name="flashvars" value="(.*?)"', webpage, 'flashvars', default=None) if flashvars: vardict = compat_parse_qs(flashvars) if 'mediaData' not in vardict: raise ExtractorError('Unable to extract media URL') mobj = re.search( r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0]) if mobj is None: raise ExtractorError('Unable to extract media URL') mediaURL = mobj.group('mediaURL').replace('\\/', '/') video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key')) video_ext = determine_ext(video_url) if video_url is None: player_url = self._search_regex( r"swfobject\.embedSWF\('([^']+)'", webpage, 'config URL', default=None) if player_url: config_url = self._search_regex( r'config=(.+)$', player_url, 'config URL') config_doc = self._download_xml( config_url, video_id, note='Downloading video config') smil_url = config_doc.find('.//properties').attrib['smil_file'] smil_doc = self._download_xml( smil_url, video_id, note='Downloading SMIL document') base_url = smil_doc.find('./head/meta').attrib['base'] video_url = [] for vn in smil_doc.findall('.//video'): br = int(vn.attrib['system-bitrate']) play_path = vn.attrib['src'] video_url.append({ 'format_id': 'smil-%d' % br, 'url': base_url, 'play_path': play_path, 'page_url': url, 'player_url': player_url, 'ext': play_path.partition(':')[0], }) if video_url is None: flashvars = self._parse_json(self._search_regex( r'flashvars\s*=\s*({.*});', webpage, 'flashvars', default=None), video_id, fatal=False) if flashvars: video_url = [] for source in flashvars.get('sources'): source_url = source.get('src') if not source_url: continue ext = mimetype2ext(source.get('type')) or determine_ext(source_url) if ext == 'm3u8': video_url.extend(self._extract_m3u8_formats( source_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: video_url.append({ 'url': source_url, 'ext': ext, }) if video_url is None: raise ExtractorError('Unsupported video type') description = self._html_search_meta( ['og:description', 'twitter:description', 'description'], webpage, 'title', fatal=False) thumbnail = self._html_search_meta( ['og:image', 'twitter:image'], webpage, 'title', fatal=False) video_uploader = self._html_search_regex( r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);', webpage, 'uploader nickname', fatal=False) duration = int_or_none( self._html_search_meta('video:duration', webpage, default=None)) age_limit = ( 18 if re.search(r'(?:"contentRating":|"rating",)"restricted"', webpage) else 0) if isinstance(video_url, list): formats = video_url else: formats = [{ 'url': video_url, 'ext': video_ext, }] self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'description': description, 'uploader': video_uploader, 'title': video_title, 'thumbnail': thumbnail, 'age_limit': age_limit, 'formats': formats, 'duration': duration, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/metacritic.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( fix_xml_ampersands, ) class MetacriticIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?metacritic\.com/.+?/trailers/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.metacritic.com/game/playstation-4/infamous-second-son/trailers/3698222', 'info_dict': { 'id': '3698222', 'ext': 'mp4', 'title': 'inFamous: Second Son - inSide Sucker Punch: Smoke & Mirrors', 'description': 'Take a peak behind-the-scenes to see how Sucker Punch brings smoke into the universe of inFAMOUS Second Son on the PS4.', 'duration': 221, }, 'skip': 'Not providing trailers anymore', }, { 'url': 'http://www.metacritic.com/game/playstation-4/tales-from-the-borderlands-a-telltale-game-series/trailers/5740315', 'info_dict': { 'id': '5740315', 'ext': 'mp4', 'title': 'Tales from the Borderlands - Finale: The Vault of the Traveler', 'description': 'In the final episode of the season, all hell breaks loose. Jack is now in control of Helios\' systems, and he\'s ready to reclaim his rightful place as king of Hyperion (with or without you).', 'duration': 114, }, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) # The xml is not well formatted, there are raw '&' info = self._download_xml('http://www.metacritic.com/video_data?video=' + video_id, video_id, 'Downloading info xml', transform_source=fix_xml_ampersands) clip = next(c for c in info.findall('playList/clip') if c.find('id').text == video_id) formats = [] for videoFile in clip.findall('httpURI/videoFile'): rate_str = videoFile.find('rate').text video_url = videoFile.find('filePath').text formats.append({ 'url': video_url, 'ext': 'mp4', 'format_id': rate_str, 'tbr': int(rate_str), }) self._sort_formats(formats) description = self._html_search_regex(r'<b>Description:</b>(.*?)</p>', webpage, 'description', flags=re.DOTALL) return { 'id': video_id, 'title': clip.find('title').text, 'formats': formats, 'description': description, 'duration': int(clip.find('duration').text), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mgoon.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, qualities, unified_strdate, ) class MgoonIE(InfoExtractor): _VALID_URL = r'''(?x)https?://(?:www\.)? (?:(:?m\.)?mgoon\.com/(?:ch/(?:.+)/v|play/view)| video\.mgoon\.com)/(?P<id>[0-9]+)''' _API_URL = 'http://mpos.mgoon.com/player/video?id={0:}' _TESTS = [ { 'url': 'http://m.mgoon.com/ch/hi6618/v/5582148', 'md5': 'dd46bb66ab35cf6d51cc812fd82da79d', 'info_dict': { 'id': '5582148', 'uploader_id': 'hi6618', 'duration': 240.419, 'upload_date': '20131220', 'ext': 'mp4', 'title': 'md5:543aa4c27a4931d371c3f433e8cebebc', 'thumbnail': r're:^https?://.*\.jpg$', } }, { 'url': 'http://www.mgoon.com/play/view/5582148', 'only_matching': True, }, { 'url': 'http://video.mgoon.com/5582148', 'only_matching': True, }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') data = self._download_json(self._API_URL.format(video_id), video_id) if data.get('errorInfo', {}).get('code') != 'NONE': raise ExtractorError('%s encountered an error: %s' % ( self.IE_NAME, data['errorInfo']['message']), expected=True) v_info = data['videoInfo'] title = v_info.get('v_title') thumbnail = v_info.get('v_thumbnail') duration = v_info.get('v_duration') upload_date = unified_strdate(v_info.get('v_reg_date')) uploader_id = data.get('userInfo', {}).get('u_alias') if duration: duration /= 1000.0 age_limit = None if data.get('accessInfo', {}).get('code') == 'VIDEO_STATUS_ADULT': age_limit = 18 formats = [] get_quality = qualities(['360p', '480p', '720p', '1080p']) for fmt in data['videoFiles']: formats.append({ 'format_id': fmt['label'], 'quality': get_quality(fmt['label']), 'url': fmt['url'], 'ext': fmt['format'], }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'duration': duration, 'upload_date': upload_date, 'uploader_id': uploader_id, 'age_limit': age_limit, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mgtv.py
# coding: utf-8 from __future__ import unicode_literals import base64 import time import uuid from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_str, ) from ..utils import ( ExtractorError, int_or_none, ) class MGTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?mgtv\.com/(v|b)/(?:[^/]+/)*(?P<id>\d+)\.html' IE_DESC = '芒果TV' _GEO_COUNTRIES = ['CN'] _TESTS = [{ 'url': 'http://www.mgtv.com/v/1/290525/f/3116640.html', 'info_dict': { 'id': '3116640', 'ext': 'mp4', 'title': '我是歌手 第四季', 'description': '我是歌手第四季双年巅峰会', 'duration': 7461, 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'http://www.mgtv.com/b/301817/3826653.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) try: api_data = self._download_json( 'https://pcweb.api.mgtv.com/player/video', video_id, query={ 'tk2': base64.urlsafe_b64encode(b'did=%s|pno=1030|ver=0.3.0301|clit=%d' % (compat_str(uuid.uuid4()).encode(), time.time()))[::-1], 'video_id': video_id, }, headers=self.geo_verification_headers())['data'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: error = self._parse_json(e.cause.read().decode(), None) if error.get('code') == 40005: self.raise_geo_restricted(countries=self._GEO_COUNTRIES) raise ExtractorError(error['msg'], expected=True) raise info = api_data['info'] title = info['title'].strip() stream_data = self._download_json( 'https://pcweb.api.mgtv.com/player/getSource', video_id, query={ 'pm2': api_data['atc']['pm2'], 'video_id': video_id, }, headers=self.geo_verification_headers())['data'] stream_domain = stream_data['stream_domain'][0] formats = [] for idx, stream in enumerate(stream_data['stream']): stream_path = stream.get('url') if not stream_path: continue format_data = self._download_json( stream_domain + stream_path, video_id, note='Download video info for format #%d' % idx) format_url = format_data.get('info') if not format_url: continue tbr = int_or_none(stream.get('filebitrate') or self._search_regex( r'_(\d+)_mp4/', format_url, 'tbr', default=None)) formats.append({ 'format_id': compat_str(tbr or idx), 'url': format_url, 'ext': 'mp4', 'tbr': tbr, 'protocol': 'm3u8_native', 'http_headers': { 'Referer': url, }, 'format_note': stream.get('name'), }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'description': info.get('desc'), 'duration': int_or_none(info.get('duration')), 'thumbnail': info.get('thumb'), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/miaopai.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class MiaoPaiIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?miaopai\.com/show/(?P<id>[-A-Za-z0-9~_]+)' _TEST = { 'url': 'http://www.miaopai.com/show/n~0hO7sfV1nBEw4Y29-Hqg__.htm', 'md5': '095ed3f1cd96b821add957bdc29f845b', 'info_dict': { 'id': 'n~0hO7sfV1nBEw4Y29-Hqg__', 'ext': 'mp4', 'title': '西游记音乐会的秒拍视频', 'thumbnail': 're:^https?://.*/n~0hO7sfV1nBEw4Y29-Hqg___m.jpg', } } _USER_AGENT_IPAD = 'Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1' def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( url, video_id, headers={'User-Agent': self._USER_AGENT_IPAD}) title = self._html_search_regex( r'<title>([^<]+)</title>', webpage, 'title') thumbnail = self._html_search_regex( r'<div[^>]+class=(?P<q1>[\'"]).*\bvideo_img\b.*(?P=q1)[^>]+data-url=(?P<q2>[\'"])(?P<url>[^\'"]+)(?P=q2)', webpage, 'thumbnail', fatal=False, group='url') videos = self._parse_html5_media_entries(url, webpage, video_id) info = videos[0] info.update({ 'id': video_id, 'title': title, 'thumbnail': thumbnail, }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/microsoftvirtualacademy.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_xpath, ) from ..utils import ( int_or_none, parse_duration, smuggle_url, unsmuggle_url, xpath_text, ) class MicrosoftVirtualAcademyBaseIE(InfoExtractor): def _extract_base_url(self, course_id, display_id): return self._download_json( 'https://api-mlxprod.microsoft.com/services/products/anonymous/%s' % course_id, display_id, 'Downloading course base URL') def _extract_chapter_and_title(self, title): if not title: return None, None m = re.search(r'(?P<chapter>\d+)\s*\|\s*(?P<title>.+)', title) return (int(m.group('chapter')), m.group('title')) if m else (None, title) class MicrosoftVirtualAcademyIE(MicrosoftVirtualAcademyBaseIE): IE_NAME = 'mva' IE_DESC = 'Microsoft Virtual Academy videos' _VALID_URL = r'(?:%s:|https?://(?:mva\.microsoft|(?:www\.)?microsoftvirtualacademy)\.com/[^/]+/training-courses/[^/?#&]+-)(?P<course_id>\d+)(?::|\?l=)(?P<id>[\da-zA-Z]+_\d+)' % IE_NAME _TESTS = [{ 'url': 'https://mva.microsoft.com/en-US/training-courses/microsoft-azure-fundamentals-virtual-machines-11788?l=gfVXISmEB_6804984382', 'md5': '7826c44fc31678b12ad8db11f6b5abb9', 'info_dict': { 'id': 'gfVXISmEB_6804984382', 'ext': 'mp4', 'title': 'Course Introduction', 'formats': 'mincount:3', 'subtitles': { 'en': [{ 'ext': 'ttml', }], }, } }, { 'url': 'mva:11788:gfVXISmEB_6804984382', 'only_matching': True, }] def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) mobj = re.match(self._VALID_URL, url) course_id = mobj.group('course_id') video_id = mobj.group('id') base_url = smuggled_data.get('base_url') or self._extract_base_url(course_id, video_id) settings = self._download_xml( '%s/content/content_%s/videosettings.xml?v=1' % (base_url, video_id), video_id, 'Downloading video settings XML') _, title = self._extract_chapter_and_title(xpath_text( settings, './/Title', 'title', fatal=True)) formats = [] for sources in settings.findall(compat_xpath('.//MediaSources')): sources_type = sources.get('videoType') for source in sources.findall(compat_xpath('./MediaSource')): video_url = source.text if not video_url or not video_url.startswith('http'): continue if sources_type == 'smoothstreaming': formats.extend(self._extract_ism_formats( video_url, video_id, 'mss', fatal=False)) continue video_mode = source.get('videoMode') height = int_or_none(self._search_regex( r'^(\d+)[pP]$', video_mode or '', 'height', default=None)) codec = source.get('codec') acodec, vcodec = [None] * 2 if codec: codecs = codec.split(',') if len(codecs) == 2: acodec, vcodec = codecs elif len(codecs) == 1: vcodec = codecs[0] formats.append({ 'url': video_url, 'format_id': video_mode, 'height': height, 'acodec': acodec, 'vcodec': vcodec, }) self._sort_formats(formats) subtitles = {} for source in settings.findall(compat_xpath('.//MarkerResourceSource')): subtitle_url = source.text if not subtitle_url: continue subtitles.setdefault('en', []).append({ 'url': '%s/%s' % (base_url, subtitle_url), 'ext': source.get('type'), }) return { 'id': video_id, 'title': title, 'subtitles': subtitles, 'formats': formats } class MicrosoftVirtualAcademyCourseIE(MicrosoftVirtualAcademyBaseIE): IE_NAME = 'mva:course' IE_DESC = 'Microsoft Virtual Academy courses' _VALID_URL = r'(?:%s:|https?://(?:mva\.microsoft|(?:www\.)?microsoftvirtualacademy)\.com/[^/]+/training-courses/(?P<display_id>[^/?#&]+)-)(?P<id>\d+)' % IE_NAME _TESTS = [{ 'url': 'https://mva.microsoft.com/en-US/training-courses/microsoft-azure-fundamentals-virtual-machines-11788', 'info_dict': { 'id': '11788', 'title': 'Microsoft Azure Fundamentals: Virtual Machines', }, 'playlist_count': 36, }, { # with emphasized chapters 'url': 'https://mva.microsoft.com/en-US/training-courses/developing-windows-10-games-with-construct-2-16335', 'info_dict': { 'id': '16335', 'title': 'Developing Windows 10 Games with Construct 2', }, 'playlist_count': 10, }, { 'url': 'https://www.microsoftvirtualacademy.com/en-US/training-courses/microsoft-azure-fundamentals-virtual-machines-11788', 'only_matching': True, }, { 'url': 'mva:course:11788', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if MicrosoftVirtualAcademyIE.suitable(url) else super( MicrosoftVirtualAcademyCourseIE, cls).suitable(url) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) course_id = mobj.group('id') display_id = mobj.group('display_id') base_url = self._extract_base_url(course_id, display_id) manifest = self._download_json( '%s/imsmanifestlite.json' % base_url, display_id, 'Downloading course manifest JSON')['manifest'] organization = manifest['organizations']['organization'][0] entries = [] for chapter in organization['item']: chapter_number, chapter_title = self._extract_chapter_and_title(chapter.get('title')) chapter_id = chapter.get('@identifier') for item in chapter.get('item', []): item_id = item.get('@identifier') if not item_id: continue metadata = item.get('resource', {}).get('metadata') or {} if metadata.get('learningresourcetype') != 'Video': continue _, title = self._extract_chapter_and_title(item.get('title')) duration = parse_duration(metadata.get('duration')) description = metadata.get('description') entries.append({ '_type': 'url_transparent', 'url': smuggle_url( 'mva:%s:%s' % (course_id, item_id), {'base_url': base_url}), 'title': title, 'description': description, 'duration': duration, 'chapter': chapter_title, 'chapter_number': chapter_number, 'chapter_id': chapter_id, }) title = organization.get('title') or manifest.get('metadata', {}).get('title') return self.playlist_result(entries, course_id, title)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/minhateca.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, parse_filesize, sanitized_Request, urlencode_postdata, ) class MinhatecaIE(InfoExtractor): _VALID_URL = r'https?://minhateca\.com\.br/[^?#]+,(?P<id>[0-9]+)\.' _TEST = { 'url': 'http://minhateca.com.br/pereba/misc/youtube-dl+test+video,125848331.mp4(video)', 'info_dict': { 'id': '125848331', 'ext': 'mp4', 'title': 'youtube-dl test video', 'thumbnail': r're:^https?://.*\.jpg$', 'filesize_approx': 1530000, 'duration': 9, 'view_count': int, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) token = self._html_search_regex( r'<input name="__RequestVerificationToken".*?value="([^"]+)"', webpage, 'request token') token_data = [ ('fileId', video_id), ('__RequestVerificationToken', token), ] req = sanitized_Request( 'http://minhateca.com.br/action/License/Download', data=urlencode_postdata(token_data)) req.add_header('Content-Type', 'application/x-www-form-urlencoded') data = self._download_json( req, video_id, note='Downloading metadata') video_url = data['redirectUrl'] title_str = self._html_search_regex( r'<h1.*?>(.*?)</h1>', webpage, 'title') title, _, ext = title_str.rpartition('.') filesize_approx = parse_filesize(self._html_search_regex( r'<p class="fileSize">(.*?)</p>', webpage, 'file size approximation', fatal=False)) duration = parse_duration(self._html_search_regex( r'(?s)<p class="fileLeng[ht][th]">.*?class="bold">(.*?)<', webpage, 'duration', fatal=False)) view_count = int_or_none(self._html_search_regex( r'<p class="downloadsCounter">([0-9]+)</p>', webpage, 'view count', fatal=False)) return { 'id': video_id, 'url': video_url, 'title': title, 'ext': ext, 'filesize_approx': filesize_approx, 'duration': duration, 'view_count': view_count, 'thumbnail': self._og_search_thumbnail(webpage), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/ministrygrid.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, smuggle_url, ) class MinistryGridIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ministrygrid\.com/([^/?#]*/)*(?P<id>[^/#?]+)/?(?:$|[?#])' _TEST = { 'url': 'http://www.ministrygrid.com/training-viewer/-/training/t4g-2014-conference/the-gospel-by-numbers-4/the-gospel-by-numbers', 'md5': '844be0d2a1340422759c2a9101bab017', 'info_dict': { 'id': '3453494717001', 'ext': 'mp4', 'title': 'The Gospel by Numbers', 'thumbnail': r're:^https?://.*\.jpg', 'upload_date': '20140410', 'description': 'Coming soon from T4G 2014!', 'uploader_id': '2034960640001', 'timestamp': 1397145591, }, 'params': { # m3u8 download 'skip_download': True, }, 'add_ie': ['TDSLifeway'], } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) portlets = self._parse_json(self._search_regex( r'Liferay\.Portlet\.list=(\[.+?\])', webpage, 'portlet list'), video_id) pl_id = self._search_regex( r'getPlid:function\(\){return"(\d+)"}', webpage, 'p_l_id') for i, portlet in enumerate(portlets): portlet_url = 'http://www.ministrygrid.com/c/portal/render_portlet?p_l_id=%s&p_p_id=%s' % (pl_id, portlet) portlet_code = self._download_webpage( portlet_url, video_id, note='Looking in portlet %s (%d/%d)' % (portlet, i + 1, len(portlets)), fatal=False) video_iframe_url = self._search_regex( r'<iframe.*?src="([^"]+)"', portlet_code, 'video iframe', default=None) if video_iframe_url: return self.url_result( smuggle_url(video_iframe_url, {'force_videoid': video_id}), video_id=video_id) raise ExtractorError('Could not find video iframe in any portlets')
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/minoto.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_codecs, ) class MinotoIE(InfoExtractor): _VALID_URL = r'(?:minoto:|https?://(?:play|iframe|embed)\.minoto-video\.com/(?P<player_id>[0-9]+)/)(?P<id>[a-zA-Z0-9]+)' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) player_id = mobj.group('player_id') or '1' video_id = mobj.group('id') video_data = self._download_json('http://play.minoto-video.com/%s/%s.js' % (player_id, video_id), video_id) video_metadata = video_data['video-metadata'] formats = [] for fmt in video_data['video-files']: fmt_url = fmt.get('url') if not fmt_url: continue container = fmt.get('container') if container == 'hls': formats.extend(fmt_url, video_id, 'mp4', m3u8_id='hls', fatal=False) else: fmt_profile = fmt.get('profile') or {} formats.append({ 'format_id': fmt_profile.get('name-short'), 'format_note': fmt_profile.get('name'), 'url': fmt_url, 'container': container, 'tbr': int_or_none(fmt.get('bitrate')), 'filesize': int_or_none(fmt.get('filesize')), 'width': int_or_none(fmt.get('width')), 'height': int_or_none(fmt.get('height')), 'codecs': parse_codecs(fmt.get('codecs')), }) self._sort_formats(formats) return { 'id': video_id, 'title': video_metadata['title'], 'description': video_metadata.get('description'), 'thumbnail': video_metadata.get('video-poster', {}).get('url'), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/miomio.py
# coding: utf-8 from __future__ import unicode_literals import random from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( xpath_text, int_or_none, ExtractorError, sanitized_Request, ) class MioMioIE(InfoExtractor): IE_NAME = 'miomio.tv' _VALID_URL = r'https?://(?:www\.)?miomio\.tv/watch/cc(?P<id>[0-9]+)' _TESTS = [{ # "type=video" in flashvars 'url': 'http://www.miomio.tv/watch/cc88912/', 'info_dict': { 'id': '88912', 'ext': 'flv', 'title': '【SKY】字幕 铠武昭和VS平成 假面骑士大战FEAT战队 魔星字幕组 字幕', 'duration': 5923, }, 'skip': 'Unable to load videos', }, { 'url': 'http://www.miomio.tv/watch/cc184024/', 'info_dict': { 'id': '43729', 'title': '《动漫同人插画绘制》', }, 'playlist_mincount': 86, 'skip': 'Unable to load videos', }, { 'url': 'http://www.miomio.tv/watch/cc173113/', 'info_dict': { 'id': '173113', 'title': 'The New Macbook 2015 上手试玩与简评' }, 'playlist_mincount': 2, 'skip': 'Unable to load videos', }, { # new 'h5' player 'url': 'http://www.miomio.tv/watch/cc273997/', 'md5': '0b27a4b4495055d826813f8c3a6b2070', 'info_dict': { 'id': '273997', 'ext': 'mp4', 'title': 'マツコの知らない世界【劇的進化SP!ビニール傘&冷凍食品2016】 1_2 - 16 05 31', }, 'skip': 'Unable to load videos', }] def _extract_mioplayer(self, webpage, video_id, title, http_headers): xml_config = self._search_regex( r'flashvars="type=(?:sina|video)&amp;(.+?)&amp;', webpage, 'xml config') # skipping the following page causes lags and eventually connection drop-outs self._request_webpage( 'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/xml.php?id=%s&r=%s' % (id, random.randint(100, 999)), video_id) vid_config_request = sanitized_Request( 'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?{0}'.format(xml_config), headers=http_headers) # the following xml contains the actual configuration information on the video file(s) vid_config = self._download_xml(vid_config_request, video_id) if not int_or_none(xpath_text(vid_config, 'timelength')): raise ExtractorError('Unable to load videos!', expected=True) entries = [] for f in vid_config.findall('./durl'): segment_url = xpath_text(f, 'url', 'video url') if not segment_url: continue order = xpath_text(f, 'order', 'order') segment_id = video_id segment_title = title if order: segment_id += '-%s' % order segment_title += ' part %s' % order entries.append({ 'id': segment_id, 'url': segment_url, 'title': segment_title, 'duration': int_or_none(xpath_text(f, 'length', 'duration'), 1000), 'http_headers': http_headers, }) return entries def _download_chinese_webpage(self, *args, **kwargs): # Requests with English locales return garbage headers = { 'Accept-Language': 'zh-TW,en-US;q=0.7,en;q=0.3', } kwargs.setdefault('headers', {}).update(headers) return self._download_webpage(*args, **kwargs) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_chinese_webpage( url, video_id) title = self._html_search_meta( 'description', webpage, 'title', fatal=True) mioplayer_path = self._search_regex( r'src="(/mioplayer(?:_h5)?/[^"]+)"', webpage, 'ref_path') if '_h5' in mioplayer_path: player_url = compat_urlparse.urljoin(url, mioplayer_path) player_webpage = self._download_chinese_webpage( player_url, video_id, note='Downloading player webpage', headers={'Referer': url}) entries = self._parse_html5_media_entries(player_url, player_webpage, video_id) http_headers = {'Referer': player_url} else: http_headers = {'Referer': 'http://www.miomio.tv%s' % mioplayer_path} entries = self._extract_mioplayer(webpage, video_id, title, http_headers) if len(entries) == 1: segment = entries[0] segment['id'] = video_id segment['title'] = title segment['http_headers'] = http_headers return segment return { '_type': 'multi_video', 'id': video_id, 'entries': entries, 'title': title, 'http_headers': http_headers, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mit.py
from __future__ import unicode_literals import re import json from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import ( clean_html, ExtractorError, get_element_by_id, ) class TechTVMITIE(InfoExtractor): IE_NAME = 'techtv.mit.edu' _VALID_URL = r'https?://techtv\.mit\.edu/(?:videos|embeds)/(?P<id>\d+)' _TEST = { 'url': 'http://techtv.mit.edu/videos/25418-mit-dna-learning-center-set', 'md5': '00a3a27ee20d44bcaa0933ccec4a2cf7', 'info_dict': { 'id': '25418', 'ext': 'mp4', 'title': 'MIT DNA and Protein Sets', 'description': 'md5:46f5c69ce434f0a97e7c628cc142802d', }, } def _real_extract(self, url): video_id = self._match_id(url) raw_page = self._download_webpage( 'http://techtv.mit.edu/videos/%s' % video_id, video_id) clean_page = re.compile(r'<!--.*?-->', re.S).sub('', raw_page) base_url = self._proto_relative_url(self._search_regex( r'ipadUrl: \'(.+?cloudfront.net/)', raw_page, 'base url'), 'http:') formats_json = self._search_regex( r'bitrates: (\[.+?\])', raw_page, 'video formats') formats_mit = json.loads(formats_json) formats = [ { 'format_id': f['label'], 'url': base_url + f['url'].partition(':')[2], 'ext': f['url'].partition(':')[0], 'format': f['label'], 'width': f['width'], 'vbr': f['bitrate'], } for f in formats_mit ] title = get_element_by_id('edit-title', clean_page) description = clean_html(get_element_by_id('edit-description', clean_page)) thumbnail = self._search_regex( r'playlist:.*?url: \'(.+?)\'', raw_page, 'thumbnail', flags=re.DOTALL) return { 'id': video_id, 'title': title, 'formats': formats, 'description': description, 'thumbnail': thumbnail, } class OCWMITIE(InfoExtractor): IE_NAME = 'ocw.mit.edu' _VALID_URL = r'^https?://ocw\.mit\.edu/courses/(?P<topic>[a-z0-9\-]+)' _BASE_URL = 'http://ocw.mit.edu/' _TESTS = [ { 'url': 'http://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-041-probabilistic-systems-analysis-and-applied-probability-fall-2010/video-lectures/lecture-7-multiple-variables-expectations-independence/', 'info_dict': { 'id': 'EObHWIEKGjA', 'ext': 'webm', 'title': 'Lecture 7: Multiple Discrete Random Variables: Expectations, Conditioning, Independence', 'description': 'In this lecture, the professor discussed multiple random variables, expectations, and binomial distribution.', 'upload_date': '20121109', 'uploader_id': 'MIT', 'uploader': 'MIT OpenCourseWare', } }, { 'url': 'http://ocw.mit.edu/courses/mathematics/18-01sc-single-variable-calculus-fall-2010/1.-differentiation/part-a-definition-and-basic-rules/session-1-introduction-to-derivatives/', 'info_dict': { 'id': '7K1sB05pE0A', 'ext': 'mp4', 'title': 'Session 1: Introduction to Derivatives', 'upload_date': '20090818', 'uploader_id': 'MIT', 'uploader': 'MIT OpenCourseWare', 'description': 'This section contains lecture video excerpts, lecture notes, an interactive mathlet with supporting documents, and problem solving videos.', } } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) topic = mobj.group('topic') webpage = self._download_webpage(url, topic) title = self._html_search_meta('WT.cg_s', webpage) description = self._html_search_meta('Description', webpage) # search for call to ocw_embed_chapter_media(container_id, media_url, provider, page_url, image_url, start, stop, captions_file) embed_chapter_media = re.search(r'ocw_embed_chapter_media\((.+?)\)', webpage) if embed_chapter_media: metadata = re.sub(r'[\'"]', '', embed_chapter_media.group(1)) metadata = re.split(r', ?', metadata) yt = metadata[1] else: # search for call to ocw_embed_chapter_media(container_id, media_url, provider, page_url, image_url, captions_file) embed_media = re.search(r'ocw_embed_media\((.+?)\)', webpage) if embed_media: metadata = re.sub(r'[\'"]', '', embed_media.group(1)) metadata = re.split(r', ?', metadata) yt = metadata[1] else: raise ExtractorError('Unable to find embedded YouTube video.') video_id = YoutubeIE.extract_id(yt) return { '_type': 'url_transparent', 'id': video_id, 'title': title, 'description': description, 'url': yt, 'ie_key': 'Youtube', }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mitele.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, smuggle_url, parse_duration, ) class MiTeleIE(InfoExtractor): IE_DESC = 'mitele.es' _VALID_URL = r'https?://(?:www\.)?mitele\.es/(?:[^/]+/)+(?P<id>[^/]+)/player' _TESTS = [{ 'url': 'http://www.mitele.es/programas-tv/diario-de/57b0dfb9c715da65618b4afa/player', 'info_dict': { 'id': 'FhYW1iNTE6J6H7NkQRIEzfne6t2quqPg', 'ext': 'mp4', 'title': 'Tor, la web invisible', 'description': 'md5:3b6fce7eaa41b2d97358726378d9369f', 'series': 'Diario de', 'season': 'La redacción', 'season_number': 14, 'season_id': 'diario_de_t14_11981', 'episode': 'Programa 144', 'episode_number': 3, 'thumbnail': r're:(?i)^https?://.*\.jpg$', 'duration': 2913, }, 'add_ie': ['Ooyala'], }, { # no explicit title 'url': 'http://www.mitele.es/programas-tv/cuarto-milenio/57b0de3dc915da14058b4876/player', 'info_dict': { 'id': 'oyNG1iNTE6TAPP-JmCjbwfwJqqMMX3Vq', 'ext': 'mp4', 'title': 'Cuarto Milenio Temporada 6 Programa 226', 'description': 'md5:5ff132013f0cd968ffbf1f5f3538a65f', 'series': 'Cuarto Milenio', 'season': 'Temporada 6', 'season_number': 6, 'season_id': 'cuarto_milenio_t06_12715', 'episode': 'Programa 226', 'episode_number': 24, 'thumbnail': r're:(?i)^https?://.*\.jpg$', 'duration': 7313, }, 'params': { 'skip_download': True, }, 'add_ie': ['Ooyala'], }, { 'url': 'http://www.mitele.es/series-online/la-que-se-avecina/57aac5c1c915da951a8b45ed/player', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) paths = self._download_json( 'https://www.mitele.es/amd/agp/web/metadata/general_configuration', video_id, 'Downloading paths JSON') ooyala_s = paths['general_configuration']['api_configuration']['ooyala_search'] base_url = ooyala_s.get('base_url', 'cdn-search-mediaset.carbyne.ps.ooyala.com') full_path = ooyala_s.get('full_path', '/search/v1/full/providers/') source = self._download_json( '%s://%s%s%s/docs/%s' % ( ooyala_s.get('protocol', 'https'), base_url, full_path, ooyala_s.get('provider_id', '104951'), video_id), video_id, 'Downloading data JSON', query={ 'include_titles': 'Series,Season', 'product_name': ooyala_s.get('product_name', 'test'), 'format': 'full', })['hits']['hits'][0]['_source'] embedCode = source['offers'][0]['embed_codes'][0] titles = source['localizable_titles'][0] title = titles.get('title_medium') or titles['title_long'] description = titles.get('summary_long') or titles.get('summary_medium') def get(key1, key2): value1 = source.get(key1) if not value1 or not isinstance(value1, list): return if not isinstance(value1[0], dict): return return value1[0].get(key2) series = get('localizable_titles_series', 'title_medium') season = get('localizable_titles_season', 'title_medium') season_number = int_or_none(source.get('season_number')) season_id = source.get('season_id') episode = titles.get('title_sort_name') episode_number = int_or_none(source.get('episode_number')) duration = parse_duration(get('videos', 'duration')) return { '_type': 'url_transparent', # for some reason only HLS is supported 'url': smuggle_url('ooyala:' + embedCode, {'supportedformats': 'm3u8,dash'}), 'id': video_id, 'title': title, 'description': description, 'series': series, 'season': season, 'season_number': season_number, 'season_id': season_id, 'episode': episode, 'episode_number': episode_number, 'duration': duration, 'thumbnail': get('images', 'url'), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mixcloud.py
from __future__ import unicode_literals import functools import itertools import re from .common import InfoExtractor from ..compat import ( compat_b64decode, compat_chr, compat_ord, compat_str, compat_urllib_parse_unquote, compat_urlparse, compat_zip ) from ..utils import ( clean_html, ExtractorError, int_or_none, OnDemandPagedList, str_to_int, try_get, urljoin, ) class MixcloudIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|beta|m)\.)?mixcloud\.com/([^/]+)/(?!stream|uploads|favorites|listens|playlists)([^/]+)' IE_NAME = 'mixcloud' _TESTS = [{ 'url': 'http://www.mixcloud.com/dholbach/cryptkeeper/', 'info_dict': { 'id': 'dholbach-cryptkeeper', 'ext': 'm4a', 'title': 'Cryptkeeper', 'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.', 'uploader': 'Daniel Holbach', 'uploader_id': 'dholbach', 'thumbnail': r're:https?://.*\.jpg', 'view_count': int, }, }, { 'url': 'http://www.mixcloud.com/gillespeterson/caribou-7-inch-vinyl-mix-chat/', 'info_dict': { 'id': 'gillespeterson-caribou-7-inch-vinyl-mix-chat', 'ext': 'mp3', 'title': 'Caribou 7 inch Vinyl Mix & Chat', 'description': 'md5:2b8aec6adce69f9d41724647c65875e8', 'uploader': 'Gilles Peterson Worldwide', 'uploader_id': 'gillespeterson', 'thumbnail': 're:https?://.*', 'view_count': int, }, }, { 'url': 'https://beta.mixcloud.com/RedLightRadio/nosedrip-15-red-light-radio-01-18-2016/', 'only_matching': True, }] @staticmethod def _decrypt_xor_cipher(key, ciphertext): """Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR.""" return ''.join([ compat_chr(compat_ord(ch) ^ compat_ord(k)) for ch, k in compat_zip(ciphertext, itertools.cycle(key))]) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) uploader = mobj.group(1) cloudcast_name = mobj.group(2) track_id = compat_urllib_parse_unquote('-'.join((uploader, cloudcast_name))) webpage = self._download_webpage(url, track_id) # Legacy path encrypted_play_info = self._search_regex( r'm-play-info="([^"]+)"', webpage, 'play info', default=None) if encrypted_play_info is not None: # Decode encrypted_play_info = compat_b64decode(encrypted_play_info) else: # New path full_info_json = self._parse_json(self._html_search_regex( r'<script id="relay-data" type="text/x-mixcloud">([^<]+)</script>', webpage, 'play info'), 'play info') for item in full_info_json: item_data = try_get(item, [ lambda x: x['cloudcast']['data']['cloudcastLookup'], lambda x: x['cloudcastLookup']['data']['cloudcastLookup'], ], dict) if try_get(item_data, lambda x: x['streamInfo']['url']): info_json = item_data break else: raise ExtractorError('Failed to extract matching stream info') message = self._html_search_regex( r'(?s)<div[^>]+class="global-message cloudcast-disabled-notice-light"[^>]*>(.+?)<(?:a|/div)', webpage, 'error message', default=None) js_url = self._search_regex( r'<script[^>]+\bsrc=["\"](https://(?:www\.)?mixcloud\.com/media/(?:js2/www_js_4|js/www)\.[^>]+\.js)', webpage, 'js url') js = self._download_webpage(js_url, track_id, 'Downloading JS') # Known plaintext attack if encrypted_play_info: kps = ['{"stream_url":'] kpa_target = encrypted_play_info else: kps = ['https://', 'http://'] kpa_target = compat_b64decode(info_json['streamInfo']['url']) for kp in kps: partial_key = self._decrypt_xor_cipher(kpa_target, kp) for quote in ["'", '"']: key = self._search_regex( r'{0}({1}[^{0}]*){0}'.format(quote, re.escape(partial_key)), js, 'encryption key', default=None) if key is not None: break else: continue break else: raise ExtractorError('Failed to extract encryption key') if encrypted_play_info is not None: play_info = self._parse_json(self._decrypt_xor_cipher(key, encrypted_play_info), 'play info') if message and 'stream_url' not in play_info: raise ExtractorError('%s said: %s' % (self.IE_NAME, message), expected=True) song_url = play_info['stream_url'] formats = [{ 'format_id': 'normal', 'url': song_url }] title = self._html_search_regex(r'm-title="([^"]+)"', webpage, 'title') thumbnail = self._proto_relative_url(self._html_search_regex( r'm-thumbnail-url="([^"]+)"', webpage, 'thumbnail', fatal=False)) uploader = self._html_search_regex( r'm-owner-name="([^"]+)"', webpage, 'uploader', fatal=False) uploader_id = self._search_regex( r'\s+"profile": "([^"]+)",', webpage, 'uploader id', fatal=False) description = self._og_search_description(webpage) view_count = str_to_int(self._search_regex( [r'<meta itemprop="interactionCount" content="UserPlays:([0-9]+)"', r'/listeners/?">([0-9,.]+)</a>', r'(?:m|data)-tooltip=["\']([\d,.]+) plays'], webpage, 'play count', default=None)) else: title = info_json['name'] thumbnail = urljoin( 'https://thumbnailer.mixcloud.com/unsafe/600x600/', try_get(info_json, lambda x: x['picture']['urlRoot'], compat_str)) uploader = try_get(info_json, lambda x: x['owner']['displayName']) uploader_id = try_get(info_json, lambda x: x['owner']['username']) description = try_get(info_json, lambda x: x['description']) view_count = int_or_none(try_get(info_json, lambda x: x['plays'])) stream_info = info_json['streamInfo'] formats = [] def decrypt_url(f_url): for k in (key, 'IFYOUWANTTHEARTISTSTOGETPAIDDONOTDOWNLOADFROMMIXCLOUD'): decrypted_url = self._decrypt_xor_cipher(k, f_url) if re.search(r'^https?://[0-9A-Za-z.]+/[0-9A-Za-z/.?=&_-]+$', decrypted_url): return decrypted_url for url_key in ('url', 'hlsUrl', 'dashUrl'): format_url = stream_info.get(url_key) if not format_url: continue decrypted = decrypt_url(compat_b64decode(format_url)) if not decrypted: continue if url_key == 'hlsUrl': formats.extend(self._extract_m3u8_formats( decrypted, track_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif url_key == 'dashUrl': formats.extend(self._extract_mpd_formats( decrypted, track_id, mpd_id='dash', fatal=False)) else: formats.append({ 'format_id': 'http', 'url': decrypted, 'downloader_options': { # Mixcloud starts throttling at >~5M 'http_chunk_size': 5242880, }, }) self._sort_formats(formats) return { 'id': track_id, 'title': title, 'formats': formats, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'uploader_id': uploader_id, 'view_count': view_count, } class MixcloudPlaylistBaseIE(InfoExtractor): _PAGE_SIZE = 24 def _find_urls_in_page(self, page): for url in re.findall(r'm-play-button m-url="(?P<url>[^"]+)"', page): yield self.url_result( compat_urlparse.urljoin('https://www.mixcloud.com', clean_html(url)), MixcloudIE.ie_key()) def _fetch_tracks_page(self, path, video_id, page_name, current_page, real_page_number=None): real_page_number = real_page_number or current_page + 1 return self._download_webpage( 'https://www.mixcloud.com/%s/' % path, video_id, note='Download %s (page %d)' % (page_name, current_page + 1), errnote='Unable to download %s' % page_name, query={'page': real_page_number, 'list': 'main', '_ajax': '1'}, headers={'X-Requested-With': 'XMLHttpRequest'}) def _tracks_page_func(self, page, video_id, page_name, current_page): resp = self._fetch_tracks_page(page, video_id, page_name, current_page) for item in self._find_urls_in_page(resp): yield item def _get_user_description(self, page_content): return self._html_search_regex( r'<div[^>]+class="profile-bio"[^>]*>(.+?)</div>', page_content, 'user description', fatal=False) class MixcloudUserIE(MixcloudPlaylistBaseIE): _VALID_URL = r'https?://(?:www\.)?mixcloud\.com/(?P<user>[^/]+)/(?P<type>uploads|favorites|listens)?/?$' IE_NAME = 'mixcloud:user' _TESTS = [{ 'url': 'http://www.mixcloud.com/dholbach/', 'info_dict': { 'id': 'dholbach_uploads', 'title': 'Daniel Holbach (uploads)', 'description': 'md5:def36060ac8747b3aabca54924897e47', }, 'playlist_mincount': 11, }, { 'url': 'http://www.mixcloud.com/dholbach/uploads/', 'info_dict': { 'id': 'dholbach_uploads', 'title': 'Daniel Holbach (uploads)', 'description': 'md5:def36060ac8747b3aabca54924897e47', }, 'playlist_mincount': 11, }, { 'url': 'http://www.mixcloud.com/dholbach/favorites/', 'info_dict': { 'id': 'dholbach_favorites', 'title': 'Daniel Holbach (favorites)', 'description': 'md5:def36060ac8747b3aabca54924897e47', }, 'params': { 'playlist_items': '1-100', }, 'playlist_mincount': 100, }, { 'url': 'http://www.mixcloud.com/dholbach/listens/', 'info_dict': { 'id': 'dholbach_listens', 'title': 'Daniel Holbach (listens)', 'description': 'md5:def36060ac8747b3aabca54924897e47', }, 'params': { 'playlist_items': '1-100', }, 'playlist_mincount': 100, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) user_id = mobj.group('user') list_type = mobj.group('type') # if only a profile URL was supplied, default to download all uploads if list_type is None: list_type = 'uploads' video_id = '%s_%s' % (user_id, list_type) profile = self._download_webpage( 'https://www.mixcloud.com/%s/' % user_id, video_id, note='Downloading user profile', errnote='Unable to download user profile') username = self._og_search_title(profile) description = self._get_user_description(profile) entries = OnDemandPagedList( functools.partial( self._tracks_page_func, '%s/%s' % (user_id, list_type), video_id, 'list of %s' % list_type), self._PAGE_SIZE) return self.playlist_result( entries, video_id, '%s (%s)' % (username, list_type), description) class MixcloudPlaylistIE(MixcloudPlaylistBaseIE): _VALID_URL = r'https?://(?:www\.)?mixcloud\.com/(?P<user>[^/]+)/playlists/(?P<playlist>[^/]+)/?$' IE_NAME = 'mixcloud:playlist' _TESTS = [{ 'url': 'https://www.mixcloud.com/RedBullThre3style/playlists/tokyo-finalists-2015/', 'info_dict': { 'id': 'RedBullThre3style_tokyo-finalists-2015', 'title': 'National Champions 2015', 'description': 'md5:6ff5fb01ac76a31abc9b3939c16243a3', }, 'playlist_mincount': 16, }, { 'url': 'https://www.mixcloud.com/maxvibes/playlists/jazzcat-on-ness-radio/', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) user_id = mobj.group('user') playlist_id = mobj.group('playlist') video_id = '%s_%s' % (user_id, playlist_id) webpage = self._download_webpage( url, user_id, note='Downloading playlist page', errnote='Unable to download playlist page') title = self._html_search_regex( r'<a[^>]+class="parent active"[^>]*><b>\d+</b><span[^>]*>([^<]+)', webpage, 'playlist title', default=None) or self._og_search_title(webpage, fatal=False) description = self._get_user_description(webpage) entries = OnDemandPagedList( functools.partial( self._tracks_page_func, '%s/playlists/%s' % (user_id, playlist_id), video_id, 'tracklist'), self._PAGE_SIZE) return self.playlist_result(entries, video_id, title, description) class MixcloudStreamIE(MixcloudPlaylistBaseIE): _VALID_URL = r'https?://(?:www\.)?mixcloud\.com/(?P<id>[^/]+)/stream/?$' IE_NAME = 'mixcloud:stream' _TEST = { 'url': 'https://www.mixcloud.com/FirstEar/stream/', 'info_dict': { 'id': 'FirstEar', 'title': 'First Ear', 'description': 'Curators of good music\nfirstearmusic.com', }, 'playlist_mincount': 192, } def _real_extract(self, url): user_id = self._match_id(url) webpage = self._download_webpage(url, user_id) entries = [] prev_page_url = None def _handle_page(page): entries.extend(self._find_urls_in_page(page)) return self._search_regex( r'm-next-page-url="([^"]+)"', page, 'next page URL', default=None) next_page_url = _handle_page(webpage) for idx in itertools.count(0): if not next_page_url or prev_page_url == next_page_url: break prev_page_url = next_page_url current_page = int(self._search_regex( r'\?page=(\d+)', next_page_url, 'next page number')) next_page_url = _handle_page(self._fetch_tracks_page( '%s/stream' % user_id, user_id, 'stream', idx, real_page_number=current_page)) username = self._og_search_title(webpage) description = self._get_user_description(webpage) return self.playlist_result(entries, user_id, username, description)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mlb.py
from __future__ import unicode_literals from .nhl import NHLBaseIE class MLBIE(NHLBaseIE): _VALID_URL = r'''(?x) https?:// (?:[\da-z_-]+\.)*(?P<site>mlb)\.com/ (?: (?: (?:[^/]+/)*c-| (?: shared/video/embed/(?:embed|m-internal-embed)\.html| (?:[^/]+/)+(?:play|index)\.jsp| )\?.*?\bcontent_id= ) (?P<id>\d+) ) ''' _CONTENT_DOMAIN = 'content.mlb.com' _TESTS = [ { 'url': 'https://www.mlb.com/mariners/video/ackleys-spectacular-catch/c-34698933', 'md5': '632358dacfceec06bad823b83d21df2d', 'info_dict': { 'id': '34698933', 'ext': 'mp4', 'title': "Ackley's spectacular catch", 'description': 'md5:7f5a981eb4f3cbc8daf2aeffa2215bf0', 'duration': 66, 'timestamp': 1405995000, 'upload_date': '20140722', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'https://www.mlb.com/video/stanton-prepares-for-derby/c-34496663', 'md5': 'bf2619bf9cacc0a564fc35e6aeb9219f', 'info_dict': { 'id': '34496663', 'ext': 'mp4', 'title': 'Stanton prepares for Derby', 'description': 'md5:d00ce1e5fd9c9069e9c13ab4faedfa57', 'duration': 46, 'timestamp': 1405120200, 'upload_date': '20140711', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'https://www.mlb.com/video/cespedes-repeats-as-derby-champ/c-34578115', 'md5': '99bb9176531adc600b90880fb8be9328', 'info_dict': { 'id': '34578115', 'ext': 'mp4', 'title': 'Cespedes repeats as Derby champ', 'description': 'md5:08df253ce265d4cf6fb09f581fafad07', 'duration': 488, 'timestamp': 1405414336, 'upload_date': '20140715', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'https://www.mlb.com/video/bautista-on-home-run-derby/c-34577915', 'md5': 'da8b57a12b060e7663ee1eebd6f330ec', 'info_dict': { 'id': '34577915', 'ext': 'mp4', 'title': 'Bautista on Home Run Derby', 'description': 'md5:b80b34031143d0986dddc64a8839f0fb', 'duration': 52, 'timestamp': 1405405122, 'upload_date': '20140715', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'https://www.mlb.com/news/blue-jays-kevin-pillar-goes-spidey-up-the-wall-to-rob-tim-beckham-of-a-homer/c-118550098', 'md5': 'e09e37b552351fddbf4d9e699c924d68', 'info_dict': { 'id': '75609783', 'ext': 'mp4', 'title': 'Must C: Pillar climbs for catch', 'description': '4/15/15: Blue Jays outfielder Kevin Pillar continues his defensive dominance by climbing the wall in left to rob Tim Beckham of a home run', 'timestamp': 1429139220, 'upload_date': '20150415', } }, { 'url': 'https://www.mlb.com/video/hargrove-homers-off-caldwell/c-1352023483?tid=67793694', 'only_matching': True, }, { 'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb', 'only_matching': True, }, { 'url': 'http://mlb.mlb.com/shared/video/embed/embed.html?content_id=36599553', 'only_matching': True, }, { 'url': 'http://mlb.mlb.com/es/video/play.jsp?content_id=36599553', 'only_matching': True, }, { 'url': 'https://www.mlb.com/cardinals/video/piscottys-great-sliding-catch/c-51175783', 'only_matching': True, }, { # From http://m.mlb.com/news/article/118550098/blue-jays-kevin-pillar-goes-spidey-up-the-wall-to-rob-tim-beckham-of-a-homer 'url': 'http://mlb.mlb.com/shared/video/embed/m-internal-embed.html?content_id=75609783&property=mlb&autoplay=true&hashmode=false&siteSection=mlb/multimedia/article_118550098/article_embed&club=mlb', 'only_matching': True, }, { 'url': 'https://www.mlb.com/cut4/carlos-gomez-borrowed-sunglasses-from-an-as-fan/c-278912842', 'only_matching': True, } ]
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mnet.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, parse_iso8601, ) class MnetIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?mnet\.(?:com|interest\.me)/tv/vod/(?:.*?\bclip_id=)?(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://www.mnet.com/tv/vod/171008', 'info_dict': { 'id': '171008', 'title': 'SS_이해인@히든박스', 'description': 'md5:b9efa592c3918b615ba69fe9f8a05c55', 'duration': 88, 'upload_date': '20151231', 'timestamp': 1451564040, 'age_limit': 0, 'thumbnails': 'mincount:5', 'thumbnail': r're:^https?://.*\.jpg$', 'ext': 'flv', }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://mnet.interest.me/tv/vod/172790', 'only_matching': True, }, { 'url': 'http://www.mnet.com/tv/vod/vod_view.asp?clip_id=172790&tabMenu=', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) # TODO: extract rtmp formats # no stype -> rtmp url # stype=H -> m3u8 url # stype=M -> mpd url info = self._download_json( 'http://content.api.mnet.com/player/vodConfig', video_id, 'Downloading vod config JSON', query={ 'id': video_id, 'ctype': 'CLIP', 'stype': 'H', })['data']['info'] title = info['title'] cdn_data = self._download_json( info['cdn'], video_id, 'Downloading vod cdn JSON')['data'][0] m3u8_url = cdn_data['url'] token = cdn_data.get('token') if token and token != '-': m3u8_url += '?' + token formats = self._extract_wowza_formats( m3u8_url, video_id, skip_protocols=['rtmp', 'rtsp', 'f4m']) self._sort_formats(formats) description = info.get('ment') duration = parse_duration(info.get('time')) timestamp = parse_iso8601(info.get('date'), delimiter=' ') age_limit = info.get('adult') if age_limit is not None: age_limit = 0 if age_limit == 'N' else 18 thumbnails = [{ 'id': thumb_format, 'url': thumb['url'], 'width': int_or_none(thumb.get('width')), 'height': int_or_none(thumb.get('height')), } for thumb_format, thumb in info.get('cover', {}).items() if thumb.get('url')] return { 'id': video_id, 'title': title, 'description': description, 'duration': duration, 'timestamp': timestamp, 'age_limit': age_limit, 'thumbnails': thumbnails, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/moevideo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, int_or_none, ) class MoeVideoIE(InfoExtractor): IE_DESC = 'LetitBit video services: moevideo.net, playreplay.net and videochart.net' _VALID_URL = r'''(?x) https?://(?P<host>(?:www\.)? (?:(?:moevideo|playreplay|videochart)\.net|thesame\.tv))/ (?:video|framevideo|embed)/(?P<id>[0-9a-z]+\.[0-9A-Za-z]+)''' _API_URL = 'http://api.letitbit.net/' _API_KEY = 'tVL0gjqo5' _TESTS = [ { 'url': 'http://moevideo.net/video/00297.0036103fe3d513ef27915216fd29', 'md5': '129f5ae1f6585d0e9bb4f38e774ffb3a', 'info_dict': { 'id': '00297.0036103fe3d513ef27915216fd29', 'ext': 'flv', 'title': 'Sink cut out machine', 'description': 'md5:f29ff97b663aefa760bf7ca63c8ca8a8', 'thumbnail': r're:^https?://.*\.jpg$', 'width': 540, 'height': 360, 'duration': 179, 'filesize': 17822500, }, 'skip': 'Video has been removed', }, { 'url': 'http://playreplay.net/video/77107.7f325710a627383d40540d8e991a', 'md5': '74f0a014d5b661f0f0e2361300d1620e', 'info_dict': { 'id': '77107.7f325710a627383d40540d8e991a', 'ext': 'flv', 'title': 'Operacion Condor.', 'description': 'md5:7e68cb2fcda66833d5081c542491a9a3', 'thumbnail': r're:^https?://.*\.jpg$', 'width': 480, 'height': 296, 'duration': 6027, 'filesize': 588257923, }, 'skip': 'Video has been removed', }, ] def _real_extract(self, url): host, video_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage( 'http://%s/video/%s' % (host, video_id), video_id, 'Downloading webpage') title = self._og_search_title(webpage) embed_webpage = self._download_webpage( 'http://%s/embed/%s' % (host, video_id), video_id, 'Downloading embed webpage') video = self._parse_json(self._search_regex( r'mvplayer\("#player"\s*,\s*({.+})', embed_webpage, 'mvplayer'), video_id)['video'] return { 'id': video_id, 'title': title, 'thumbnail': video.get('poster') or self._og_search_thumbnail(webpage), 'description': clean_html(self._og_search_description(webpage)), 'duration': int_or_none(self._og_search_property('video:duration', webpage)), 'url': video['ourUrl'], }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mofosex.py
from __future__ import unicode_literals from ..utils import ( int_or_none, str_to_int, unified_strdate, ) from .keezmovies import KeezMoviesIE class MofosexIE(KeezMoviesIE): _VALID_URL = r'https?://(?:www\.)?mofosex\.com/videos/(?P<id>\d+)/(?P<display_id>[^/?#&.]+)\.html' _TESTS = [{ 'url': 'http://www.mofosex.com/videos/318131/amateur-teen-playing-and-masturbating-318131.html', 'md5': '558fcdafbb63a87c019218d6e49daf8a', 'info_dict': { 'id': '318131', 'display_id': 'amateur-teen-playing-and-masturbating-318131', 'ext': 'mp4', 'title': 'amateur teen playing and masturbating', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20121114', 'view_count': int, 'like_count': int, 'dislike_count': int, 'age_limit': 18, } }, { # This video is no longer available 'url': 'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html', 'only_matching': True, }] def _real_extract(self, url): webpage, info = self._extract_info(url) view_count = str_to_int(self._search_regex( r'VIEWS:</span>\s*([\d,.]+)', webpage, 'view count', fatal=False)) like_count = int_or_none(self._search_regex( r'id=["\']amountLikes["\'][^>]*>(\d+)', webpage, 'like count', fatal=False)) dislike_count = int_or_none(self._search_regex( r'id=["\']amountDislikes["\'][^>]*>(\d+)', webpage, 'like count', fatal=False)) upload_date = unified_strdate(self._html_search_regex( r'Added:</span>([^<]+)', webpage, 'upload date', fatal=False)) info.update({ 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, 'upload_date': upload_date, 'thumbnail': self._og_search_thumbnail(webpage), }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mojvideo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, parse_duration, ) class MojvideoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?mojvideo\.com/video-(?P<display_id>[^/]+)/(?P<id>[a-f0-9]+)' _TEST = { 'url': 'http://www.mojvideo.com/video-v-avtu-pred-mano-rdecelaska-alfi-nipic/3d1ed4497707730b2906', 'md5': 'f7fd662cc8ce2be107b0d4f2c0483ae7', 'info_dict': { 'id': '3d1ed4497707730b2906', 'display_id': 'v-avtu-pred-mano-rdecelaska-alfi-nipic', 'ext': 'mp4', 'title': 'V avtu pred mano rdečelaska - Alfi Nipič', 'thumbnail': r're:^http://.*\.jpg$', 'duration': 242, } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') # XML is malformed playerapi = self._download_webpage( 'http://www.mojvideo.com/playerapi.php?v=%s&t=1' % video_id, display_id) if '<error>true</error>' in playerapi: error_desc = self._html_search_regex( r'<errordesc>([^<]*)</errordesc>', playerapi, 'error description', fatal=False) raise ExtractorError('%s said: %s' % (self.IE_NAME, error_desc), expected=True) title = self._html_search_regex( r'<title>([^<]+)</title>', playerapi, 'title') video_url = self._html_search_regex( r'<file>([^<]+)</file>', playerapi, 'video URL') thumbnail = self._html_search_regex( r'<preview>([^<]+)</preview>', playerapi, 'thumbnail', fatal=False) duration = parse_duration(self._html_search_regex( r'<duration>([^<]+)</duration>', playerapi, 'duration', fatal=False)) return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'title': title, 'thumbnail': thumbnail, 'duration': duration, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/morningstar.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class MorningstarIE(InfoExtractor): IE_DESC = 'morningstar.com' _VALID_URL = r'https?://(?:(?:www|news)\.)morningstar\.com/[cC]over/video[cC]enter\.aspx\?id=(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://www.morningstar.com/cover/videocenter.aspx?id=615869', 'md5': '6c0acface7a787aadc8391e4bbf7b0f5', 'info_dict': { 'id': '615869', 'ext': 'mp4', 'title': 'Get Ahead of the Curve on 2013 Taxes', 'description': "Vanguard's Joel Dickson on managing higher tax rates for high-income earners and fund capital-gain distributions in 2013.", 'thumbnail': r're:^https?://.*m(?:orning)?star\.com/.+thumb\.jpg$' } }, { 'url': 'http://news.morningstar.com/cover/videocenter.aspx?id=825556', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<h1 id="titleLink">(.*?)</h1>', webpage, 'title') video_url = self._html_search_regex( r'<input type="hidden" id="hidVideoUrl" value="([^"]+)"', webpage, 'video URL') thumbnail = self._html_search_regex( r'<input type="hidden" id="hidSnapshot" value="([^"]+)"', webpage, 'thumbnail', fatal=False) description = self._html_search_regex( r'<div id="mstarDeck".*?>(.*?)</div>', webpage, 'description', fatal=False) return { 'id': video_id, 'title': title, 'url': video_url, 'thumbnail': thumbnail, 'description': description, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/motherless.py
from __future__ import unicode_literals import datetime import re from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( ExtractorError, InAdvancePagedList, orderedSet, str_to_int, unified_strdate, ) class MotherlessIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?motherless\.com/(?:g/[a-z0-9_]+/)?(?P<id>[A-Z0-9]+)' _TESTS = [{ 'url': 'http://motherless.com/AC3FFE1', 'md5': '310f62e325a9fafe64f68c0bccb6e75f', 'info_dict': { 'id': 'AC3FFE1', 'ext': 'mp4', 'title': 'Fucked in the ass while playing PS3', 'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'], 'upload_date': '20100913', 'uploader_id': 'famouslyfuckedup', 'thumbnail': r're:http://.*\.jpg', 'age_limit': 18, } }, { 'url': 'http://motherless.com/532291B', 'md5': 'bc59a6b47d1f958e61fbd38a4d31b131', 'info_dict': { 'id': '532291B', 'ext': 'mp4', 'title': 'Amazing girl playing the omegle game, PERFECT!', 'categories': ['Amateur', 'webcam', 'omegle', 'pink', 'young', 'masturbate', 'teen', 'game', 'hairy'], 'upload_date': '20140622', 'uploader_id': 'Sulivana7x', 'thumbnail': r're:http://.*\.jpg', 'age_limit': 18, }, 'skip': '404', }, { 'url': 'http://motherless.com/g/cosplay/633979F', 'md5': '0b2a43f447a49c3e649c93ad1fafa4a0', 'info_dict': { 'id': '633979F', 'ext': 'mp4', 'title': 'Turtlette', 'categories': ['superheroine heroine superher'], 'upload_date': '20140827', 'uploader_id': 'shade0230', 'thumbnail': r're:http://.*\.jpg', 'age_limit': 18, } }, { # no keywords 'url': 'http://motherless.com/8B4BBC1', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) if any(p in webpage for p in ( '<title>404 - MOTHERLESS.COM<', ">The page you're looking for cannot be found.<")): raise ExtractorError('Video %s does not exist' % video_id, expected=True) if '>The content you are trying to view is for friends only.' in webpage: raise ExtractorError('Video %s is for friends only' % video_id, expected=True) title = self._html_search_regex( r'id="view-upload-title">\s+([^<]+)<', webpage, 'title') video_url = (self._html_search_regex( (r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', r'fileurl\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'), webpage, 'video URL', default=None, group='url') or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id) age_limit = self._rta_search(webpage) view_count = str_to_int(self._html_search_regex( r'<strong>Views</strong>\s+([^<]+)<', webpage, 'view count', fatal=False)) like_count = str_to_int(self._html_search_regex( r'<strong>Favorited</strong>\s+([^<]+)<', webpage, 'like count', fatal=False)) upload_date = self._html_search_regex( r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload date') if 'Ago' in upload_date: days = int(re.search(r'([0-9]+)', upload_date).group(1)) upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d') else: upload_date = unified_strdate(upload_date) comment_count = webpage.count('class="media-comment-contents"') uploader_id = self._html_search_regex( r'"thumb-member-username">\s+<a href="/m/([^"]+)"', webpage, 'uploader_id') categories = self._html_search_meta('keywords', webpage, default=None) if categories: categories = [cat.strip() for cat in categories.split(',')] return { 'id': video_id, 'title': title, 'upload_date': upload_date, 'uploader_id': uploader_id, 'thumbnail': self._og_search_thumbnail(webpage), 'categories': categories, 'view_count': view_count, 'like_count': like_count, 'comment_count': comment_count, 'age_limit': age_limit, 'url': video_url, } class MotherlessGroupIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?motherless\.com/gv?/(?P<id>[a-z0-9_]+)' _TESTS = [{ 'url': 'http://motherless.com/g/movie_scenes', 'info_dict': { 'id': 'movie_scenes', 'title': 'Movie Scenes', 'description': 'Hot and sexy scenes from "regular" movies... ' 'Beautiful actresses fully nude... A looot of ' 'skin! :)Enjoy!', }, 'playlist_mincount': 662, }, { 'url': 'http://motherless.com/gv/sex_must_be_funny', 'info_dict': { 'id': 'sex_must_be_funny', 'title': 'Sex must be funny', 'description': 'Sex can be funny. Wide smiles,laugh, games, fun of ' 'any kind!' }, 'playlist_mincount': 9, }] @classmethod def suitable(cls, url): return (False if MotherlessIE.suitable(url) else super(MotherlessGroupIE, cls).suitable(url)) def _extract_entries(self, webpage, base): entries = [] for mobj in re.finditer( r'href="(?P<href>/[^"]+)"[^>]*>(?:\s*<img[^>]+alt="[^-]+-\s(?P<title>[^"]+)")?', webpage): video_url = compat_urlparse.urljoin(base, mobj.group('href')) if not MotherlessIE.suitable(video_url): continue video_id = MotherlessIE._match_id(video_url) title = mobj.group('title') entries.append(self.url_result( video_url, ie=MotherlessIE.ie_key(), video_id=video_id, video_title=title)) # Alternative fallback if not entries: entries = [ self.url_result( compat_urlparse.urljoin(base, '/' + entry_id), ie=MotherlessIE.ie_key(), video_id=entry_id) for entry_id in orderedSet(re.findall( r'data-codename=["\']([A-Z0-9]+)', webpage))] return entries def _real_extract(self, url): group_id = self._match_id(url) page_url = compat_urlparse.urljoin(url, '/gv/%s' % group_id) webpage = self._download_webpage(page_url, group_id) title = self._search_regex( r'<title>([\w\s]+\w)\s+-', webpage, 'title', fatal=False) description = self._html_search_meta( 'description', webpage, fatal=False) page_count = self._int(self._search_regex( r'(\d+)</(?:a|span)><(?:a|span)[^>]+>\s*NEXT', webpage, 'page_count'), 'page_count') PAGE_SIZE = 80 def _get_page(idx): webpage = self._download_webpage( page_url, group_id, query={'page': idx + 1}, note='Downloading page %d/%d' % (idx + 1, page_count) ) for entry in self._extract_entries(webpage, url): yield entry playlist = InAdvancePagedList(_get_page, page_count, PAGE_SIZE) return { '_type': 'playlist', 'id': group_id, 'title': title, 'description': description, 'entries': playlist }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/motorsport.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_urlparse, ) class MotorsportIE(InfoExtractor): IE_DESC = 'motorsport.com' _VALID_URL = r'https?://(?:www\.)?motorsport\.com/[^/?#]+/video/(?:[^/?#]+/)(?P<id>[^/]+)/?(?:$|[?#])' _TEST = { 'url': 'http://www.motorsport.com/f1/video/main-gallery/red-bull-racing-2014-rules-explained/', 'info_dict': { 'id': '2-T3WuR-KMM', 'ext': 'mp4', 'title': 'Red Bull Racing: 2014 Rules Explained', 'duration': 208, 'description': 'A new clip from Red Bull sees Daniel Ricciardo and Sebastian Vettel explain the 2014 Formula One regulations – which are arguably the most complex the sport has ever seen.', 'uploader': 'mcomstaff', 'uploader_id': 'UC334JIYKkVnyFoNCclfZtHQ', 'upload_date': '20140903', 'thumbnail': r're:^https?://.+\.jpg$' }, 'add_ie': ['Youtube'], 'params': { 'skip_download': True, }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) iframe_path = self._html_search_regex( r'<iframe id="player_iframe"[^>]+src="([^"]+)"', webpage, 'iframe path') iframe = self._download_webpage( compat_urlparse.urljoin(url, iframe_path), display_id, 'Downloading iframe') youtube_id = self._search_regex( r'www.youtube.com/embed/(.{11})', iframe, 'youtube id') return { '_type': 'url_transparent', 'display_id': display_id, 'url': 'https://youtube.com/watch?v=%s' % youtube_id, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/movieclips.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( smuggle_url, float_or_none, parse_iso8601, update_url_query, ) class MovieClipsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?movieclips\.com/videos/.+-(?P<id>\d+)(?:\?|$)' _TEST = { 'url': 'http://www.movieclips.com/videos/warcraft-trailer-1-561180739597', 'md5': '42b5a0352d4933a7bd54f2104f481244', 'info_dict': { 'id': 'pKIGmG83AqD9', 'ext': 'mp4', 'title': 'Warcraft Trailer 1', 'description': 'Watch Trailer 1 from Warcraft (2016). Legendary’s WARCRAFT is a 3D epic adventure of world-colliding conflict based.', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1446843055, 'upload_date': '20151106', 'uploader': 'Movieclips', }, 'add_ie': ['ThePlatform'], } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video = next(v for v in self._parse_json(self._search_regex( r'var\s+__REACT_ENGINE__\s*=\s*({.+});', webpage, 'react engine'), video_id)['playlist']['videos'] if v['id'] == video_id) return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': smuggle_url(update_url_query( video['contentUrl'], {'mbr': 'true'}), {'force_smil_url': True}), 'title': self._og_search_title(webpage), 'description': self._html_search_meta('description', webpage), 'duration': float_or_none(video.get('duration')), 'timestamp': parse_iso8601(video.get('dateCreated')), 'thumbnail': video.get('defaultImage'), 'uploader': video.get('provider'), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/moviezine.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class MoviezineIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?moviezine\.se/video/(?P<id>[^?#]+)' _TEST = { 'url': 'http://www.moviezine.se/video/205866', 'info_dict': { 'id': '205866', 'ext': 'mp4', 'title': 'Oculus - Trailer 1', 'description': 'md5:40cc6790fc81d931850ca9249b40e8a4', 'thumbnail': r're:http://.*\.jpg', }, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) jsplayer = self._download_webpage('http://www.moviezine.se/api/player.js?video=%s' % video_id, video_id, 'Downloading js api player') formats = [{ 'format_id': 'sd', 'url': self._html_search_regex(r'file: "(.+?)",', jsplayer, 'file'), 'quality': 0, 'ext': 'mp4', }] self._sort_formats(formats) return { 'id': video_id, 'title': self._search_regex(r'title: "(.+?)",', jsplayer, 'title'), 'thumbnail': self._search_regex(r'image: "(.+?)",', jsplayer, 'image'), 'formats': formats, 'description': self._og_search_description(webpage), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/movingimage.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( unescapeHTML, parse_duration, ) class MovingImageIE(InfoExtractor): _VALID_URL = r'https?://movingimage\.nls\.uk/film/(?P<id>\d+)' _TEST = { 'url': 'http://movingimage.nls.uk/film/3561', 'md5': '4caa05c2b38453e6f862197571a7be2f', 'info_dict': { 'id': '3561', 'ext': 'mp4', 'title': 'SHETLAND WOOL', 'description': 'md5:c5afca6871ad59b4271e7704fe50ab04', 'duration': 900, 'thumbnail': r're:^https?://.*\.jpg$', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) formats = self._extract_m3u8_formats( self._html_search_regex(r'file\s*:\s*"([^"]+)"', webpage, 'm3u8 manifest URL'), video_id, ext='mp4', entry_protocol='m3u8_native') def search_field(field_name, fatal=False): return self._search_regex( r'<span\s+class="field_title">%s:</span>\s*<span\s+class="field_content">([^<]+)</span>' % field_name, webpage, 'title', fatal=fatal) title = unescapeHTML(search_field('Title', fatal=True)).strip('()[]') description = unescapeHTML(search_field('Description')) duration = parse_duration(search_field('Running time')) thumbnail = self._search_regex( r"image\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False) return { 'id': video_id, 'formats': formats, 'title': title, 'description': description, 'duration': duration, 'thumbnail': thumbnail, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/msn.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( determine_ext, ExtractorError, int_or_none, unescapeHTML, ) class MSNIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?msn\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/[a-z]{2}-(?P<id>[\da-zA-Z]+)' _TESTS = [{ 'url': 'http://www.msn.com/en-ae/foodanddrink/joinourtable/criminal-minds-shemar-moore-shares-a-touching-goodbye-message/vp-BBqQYNE', 'md5': '8442f66c116cbab1ff7098f986983458', 'info_dict': { 'id': 'BBqQYNE', 'display_id': 'criminal-minds-shemar-moore-shares-a-touching-goodbye-message', 'ext': 'mp4', 'title': 'Criminal Minds - Shemar Moore Shares A Touching Goodbye Message', 'description': 'md5:e8e89b897b222eb33a6b5067a8f1bc25', 'duration': 104, 'uploader': 'CBS Entertainment', 'uploader_id': 'IT0X5aoJ6bJgYerJXSDCgFmYPB1__54v', }, }, { 'url': 'http://www.msn.com/en-ae/news/offbeat/meet-the-nine-year-old-self-made-millionaire/ar-BBt6ZKf', 'only_matching': True, }, { 'url': 'http://www.msn.com/en-ae/video/watch/obama-a-lot-of-people-will-be-disappointed/vi-AAhxUMH', 'only_matching': True, }, { # geo restricted 'url': 'http://www.msn.com/en-ae/foodanddrink/joinourtable/the-first-fart-makes-you-laugh-the-last-fart-makes-you-cry/vp-AAhzIBU', 'only_matching': True, }, { 'url': 'http://www.msn.com/en-ae/entertainment/bollywood/watch-how-salman-khan-reacted-when-asked-if-he-would-apologize-for-his-‘raped-woman’-comment/vi-AAhvzW6', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id, display_id = mobj.group('id', 'display_id') webpage = self._download_webpage(url, display_id) video = self._parse_json( self._search_regex( r'data-metadata\s*=\s*(["\'])(?P<data>.+?)\1', webpage, 'video data', default='{}', group='data'), display_id, transform_source=unescapeHTML) if not video: error = unescapeHTML(self._search_regex( r'data-error=(["\'])(?P<error>.+?)\1', webpage, 'error', group='error')) raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True) title = video['title'] formats = [] for file_ in video.get('videoFiles', []): format_url = file_.get('url') if not format_url: continue if 'm3u8' in format_url: # m3u8_native should not be used here until # https://github.com/ytdl-org/youtube-dl/issues/9913 is fixed m3u8_formats = self._extract_m3u8_formats( format_url, display_id, 'mp4', m3u8_id='hls', fatal=False) formats.extend(m3u8_formats) elif determine_ext(format_url) == 'ism': formats.extend(self._extract_ism_formats( format_url + '/Manifest', display_id, 'mss', fatal=False)) else: formats.append({ 'url': format_url, 'ext': 'mp4', 'format_id': 'http', 'width': int_or_none(file_.get('width')), 'height': int_or_none(file_.get('height')), }) self._sort_formats(formats) subtitles = {} for file_ in video.get('files', []): format_url = file_.get('url') format_code = file_.get('formatCode') if not format_url or not format_code: continue if compat_str(format_code) == '3100': subtitles.setdefault(file_.get('culture', 'en'), []).append({ 'ext': determine_ext(format_url, 'ttml'), 'url': format_url, }) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': video.get('description'), 'thumbnail': video.get('headlineImage', {}).get('url'), 'duration': int_or_none(video.get('durationSecs')), 'uploader': video.get('sourceFriendly'), 'uploader_id': video.get('providerId'), 'creator': video.get('creator'), 'subtitles': subtitles, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mtv.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, compat_xpath, ) from ..utils import ( ExtractorError, find_xpath_attr, fix_xml_ampersands, float_or_none, HEADRequest, RegexNotFoundError, sanitized_Request, strip_or_none, timeconvert, try_get, unescapeHTML, update_url_query, url_basename, xpath_text, ) def _media_xml_tag(tag): return '{http://search.yahoo.com/mrss/}%s' % tag class MTVServicesInfoExtractor(InfoExtractor): _MOBILE_TEMPLATE = None _LANG = None @staticmethod def _id_from_uri(uri): return uri.split(':')[-1] @staticmethod def _remove_template_parameter(url): # Remove the templates, like &device={device} return re.sub(r'&[^=]*?={.*?}(?=(&|$))', '', url) def _get_feed_url(self, uri): return self._FEED_URL def _get_thumbnail_url(self, uri, itemdoc): search_path = '%s/%s' % (_media_xml_tag('group'), _media_xml_tag('thumbnail')) thumb_node = itemdoc.find(search_path) if thumb_node is None: return None return thumb_node.get('url') or thumb_node.text or None def _extract_mobile_video_formats(self, mtvn_id): webpage_url = self._MOBILE_TEMPLATE % mtvn_id req = sanitized_Request(webpage_url) # Otherwise we get a webpage that would execute some javascript req.add_header('User-Agent', 'curl/7') webpage = self._download_webpage(req, mtvn_id, 'Downloading mobile page') metrics_url = unescapeHTML(self._search_regex(r'<a href="(http://metrics.+?)"', webpage, 'url')) req = HEADRequest(metrics_url) response = self._request_webpage(req, mtvn_id, 'Resolving url') url = response.geturl() # Transform the url to get the best quality: url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1) return [{'url': url, 'ext': 'mp4'}] def _extract_video_formats(self, mdoc, mtvn_id, video_id): if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4|copyright_error\.flv(?:\?geo\b.+?)?)$', mdoc.find('.//src').text) is not None: if mtvn_id is not None and self._MOBILE_TEMPLATE is not None: self.to_screen('The normal version is not available from your ' 'country, trying with the mobile version') return self._extract_mobile_video_formats(mtvn_id) raise ExtractorError('This video is not available from your country.', expected=True) formats = [] for rendition in mdoc.findall('.//rendition'): if rendition.get('method') == 'hls': hls_url = rendition.find('./src').text formats.extend(self._extract_m3u8_formats( hls_url, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) else: # fms try: _, _, ext = rendition.attrib['type'].partition('/') rtmp_video_url = rendition.find('./src').text if 'error_not_available.swf' in rtmp_video_url: raise ExtractorError( '%s said: video is not available' % self.IE_NAME, expected=True) if rtmp_video_url.endswith('siteunavail.png'): continue formats.extend([{ 'ext': 'flv' if rtmp_video_url.startswith('rtmp') else ext, 'url': rtmp_video_url, 'format_id': '-'.join(filter(None, [ 'rtmp' if rtmp_video_url.startswith('rtmp') else None, rendition.get('bitrate')])), 'width': int(rendition.get('width')), 'height': int(rendition.get('height')), }]) except (KeyError, TypeError): raise ExtractorError('Invalid rendition field.') if formats: self._sort_formats(formats) return formats def _extract_subtitles(self, mdoc, mtvn_id): subtitles = {} for transcript in mdoc.findall('.//transcript'): if transcript.get('kind') != 'captions': continue lang = transcript.get('srclang') for typographic in transcript.findall('./typographic'): sub_src = typographic.get('src') if not sub_src: continue ext = typographic.get('format') if ext == 'cea-608': ext = 'scc' subtitles.setdefault(lang, []).append({ 'url': compat_str(sub_src), 'ext': ext }) return subtitles def _get_video_info(self, itemdoc, use_hls=True): uri = itemdoc.find('guid').text video_id = self._id_from_uri(uri) self.report_extraction(video_id) content_el = itemdoc.find('%s/%s' % (_media_xml_tag('group'), _media_xml_tag('content'))) mediagen_url = self._remove_template_parameter(content_el.attrib['url']) mediagen_url = mediagen_url.replace('device={device}', '') if 'acceptMethods' not in mediagen_url: mediagen_url += '&' if '?' in mediagen_url else '?' mediagen_url += 'acceptMethods=' mediagen_url += 'hls' if use_hls else 'fms' mediagen_doc = self._download_xml( mediagen_url, video_id, 'Downloading video urls', fatal=False) if mediagen_doc is False: return None item = mediagen_doc.find('./video/item') if item is not None and item.get('type') == 'text': message = '%s returned error: ' % self.IE_NAME if item.get('code') is not None: message += '%s - ' % item.get('code') message += item.text raise ExtractorError(message, expected=True) description = strip_or_none(xpath_text(itemdoc, 'description')) timestamp = timeconvert(xpath_text(itemdoc, 'pubDate')) title_el = None if title_el is None: title_el = find_xpath_attr( itemdoc, './/{http://search.yahoo.com/mrss/}category', 'scheme', 'urn:mtvn:video_title') if title_el is None: title_el = itemdoc.find(compat_xpath('.//{http://search.yahoo.com/mrss/}title')) if title_el is None: title_el = itemdoc.find(compat_xpath('.//title')) if title_el.text is None: title_el = None title = title_el.text if title is None: raise ExtractorError('Could not find video title') title = title.strip() # This a short id that's used in the webpage urls mtvn_id = None mtvn_id_node = find_xpath_attr(itemdoc, './/{http://search.yahoo.com/mrss/}category', 'scheme', 'urn:mtvn:id') if mtvn_id_node is not None: mtvn_id = mtvn_id_node.text formats = self._extract_video_formats(mediagen_doc, mtvn_id, video_id) # Some parts of complete video may be missing (e.g. missing Act 3 in # http://www.southpark.de/alle-episoden/s14e01-sexual-healing) if not formats: return None self._sort_formats(formats) return { 'title': title, 'formats': formats, 'subtitles': self._extract_subtitles(mediagen_doc, mtvn_id), 'id': video_id, 'thumbnail': self._get_thumbnail_url(uri, itemdoc), 'description': description, 'duration': float_or_none(content_el.attrib.get('duration')), 'timestamp': timestamp, } def _get_feed_query(self, uri): data = {'uri': uri} if self._LANG: data['lang'] = self._LANG return data def _get_videos_info(self, uri, use_hls=True): video_id = self._id_from_uri(uri) feed_url = self._get_feed_url(uri) info_url = update_url_query(feed_url, self._get_feed_query(uri)) return self._get_videos_info_from_url(info_url, video_id, use_hls) def _get_videos_info_from_url(self, url, video_id, use_hls=True): idoc = self._download_xml( url, video_id, 'Downloading info', transform_source=fix_xml_ampersands) title = xpath_text(idoc, './channel/title') description = xpath_text(idoc, './channel/description') entries = [] for item in idoc.findall('.//item'): info = self._get_video_info(item, use_hls) if info: entries.append(info) return self.playlist_result( entries, playlist_title=title, playlist_description=description) def _extract_triforce_mgid(self, webpage, data_zone=None, video_id=None): triforce_feed = self._parse_json(self._search_regex( r'triforceManifestFeed\s*=\s*({.+?})\s*;\s*\n', webpage, 'triforce feed', default='{}'), video_id, fatal=False) data_zone = self._search_regex( r'data-zone=(["\'])(?P<zone>.+?_lc_promo.*?)\1', webpage, 'data zone', default=data_zone, group='zone') feed_url = try_get( triforce_feed, lambda x: x['manifest']['zones'][data_zone]['feed'], compat_str) if not feed_url: return feed = self._download_json(feed_url, video_id, fatal=False) if not feed: return return try_get(feed, lambda x: x['result']['data']['id'], compat_str) def _extract_mgid(self, webpage): try: # the url can be http://media.mtvnservices.com/fb/{mgid}.swf # or http://media.mtvnservices.com/{mgid} og_url = self._og_search_video_url(webpage) mgid = url_basename(og_url) if mgid.endswith('.swf'): mgid = mgid[:-4] except RegexNotFoundError: mgid = None if mgid is None or ':' not in mgid: mgid = self._search_regex( [r'data-mgid="(.*?)"', r'swfobject\.embedSWF\(".*?(mgid:.*?)"'], webpage, 'mgid', default=None) if not mgid: sm4_embed = self._html_search_meta( 'sm4:video:embed', webpage, 'sm4 embed', default='') mgid = self._search_regex( r'embed/(mgid:.+?)["\'&?/]', sm4_embed, 'mgid', default=None) if not mgid: mgid = self._extract_triforce_mgid(webpage) return mgid def _real_extract(self, url): title = url_basename(url) webpage = self._download_webpage(url, title) mgid = self._extract_mgid(webpage) videos_info = self._get_videos_info(mgid) return videos_info class MTVServicesEmbeddedIE(MTVServicesInfoExtractor): IE_NAME = 'mtvservices:embedded' _VALID_URL = r'https?://media\.mtvnservices\.com/embed/(?P<mgid>.+?)(\?|/|$)' _TEST = { # From http://www.thewrap.com/peter-dinklage-sums-up-game-of-thrones-in-45-seconds-video/ 'url': 'http://media.mtvnservices.com/embed/mgid:uma:video:mtv.com:1043906/cp~vid%3D1043906%26uri%3Dmgid%3Auma%3Avideo%3Amtv.com%3A1043906', 'md5': 'cb349b21a7897164cede95bd7bf3fbb9', 'info_dict': { 'id': '1043906', 'ext': 'mp4', 'title': 'Peter Dinklage Sums Up \'Game Of Thrones\' In 45 Seconds', 'description': '"Sexy sexy sexy, stabby stabby stabby, beautiful language," says Peter Dinklage as he tries summarizing "Game of Thrones" in under a minute.', 'timestamp': 1400126400, 'upload_date': '20140515', }, } @staticmethod def _extract_url(webpage): mobj = re.search( r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//media.mtvnservices.com/embed/.+?)\1', webpage) if mobj: return mobj.group('url') def _get_feed_url(self, uri): video_id = self._id_from_uri(uri) config = self._download_json( 'http://media.mtvnservices.com/pmt/e1/access/index.html?uri=%s&configtype=edge' % uri, video_id) return self._remove_template_parameter(config['feedWithQueryParams']) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) mgid = mobj.group('mgid') return self._get_videos_info(mgid) class MTVIE(MTVServicesInfoExtractor): IE_NAME = 'mtv' _VALID_URL = r'https?://(?:www\.)?mtv\.com/(?:video-clips|(?:full-)?episodes)/(?P<id>[^/?#.]+)' _FEED_URL = 'http://www.mtv.com/feeds/mrss/' _TESTS = [{ 'url': 'http://www.mtv.com/video-clips/vl8qof/unlocking-the-truth-trailer', 'md5': '1edbcdf1e7628e414a8c5dcebca3d32b', 'info_dict': { 'id': '5e14040d-18a4-47c4-a582-43ff602de88e', 'ext': 'mp4', 'title': 'Unlocking The Truth|July 18, 2016|1|101|Trailer', 'description': '"Unlocking the Truth" premieres August 17th at 11/10c.', 'timestamp': 1468846800, 'upload_date': '20160718', }, }, { 'url': 'http://www.mtv.com/full-episodes/94tujl/unlocking-the-truth-gates-of-hell-season-1-ep-101', 'only_matching': True, }, { 'url': 'http://www.mtv.com/episodes/g8xu7q/teen-mom-2-breaking-the-wall-season-7-ep-713', 'only_matching': True, }] class MTVJapanIE(MTVServicesInfoExtractor): IE_NAME = 'mtvjapan' _VALID_URL = r'https?://(?:www\.)?mtvjapan\.com/videos/(?P<id>[0-9a-z]+)' _TEST = { 'url': 'http://www.mtvjapan.com/videos/prayht/fresh-info-cadillac-escalade', 'info_dict': { 'id': 'bc01da03-6fe5-4284-8880-f291f4e368f5', 'ext': 'mp4', 'title': '【Fresh Info】Cadillac ESCALADE Sport Edition', }, 'params': { 'skip_download': True, }, } _GEO_COUNTRIES = ['JP'] _FEED_URL = 'http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed' def _get_feed_query(self, uri): return { 'arcEp': 'mtvjapan.com', 'mgid': uri, } class MTVVideoIE(MTVServicesInfoExtractor): IE_NAME = 'mtv:video' _VALID_URL = r'''(?x)^https?:// (?:(?:www\.)?mtv\.com/videos/.+?/(?P<videoid>[0-9]+)/[^/]+$| m\.mtv\.com/videos/video\.rbml\?.*?id=(?P<mgid>[^&]+))''' _FEED_URL = 'http://www.mtv.com/player/embed/AS3/rss/' _TESTS = [ { 'url': 'http://www.mtv.com/videos/misc/853555/ours-vh1-storytellers.jhtml', 'md5': '850f3f143316b1e71fa56a4edfd6e0f8', 'info_dict': { 'id': '853555', 'ext': 'mp4', 'title': 'Taylor Swift - "Ours (VH1 Storytellers)"', 'description': 'Album: Taylor Swift performs "Ours" for VH1 Storytellers at Harvey Mudd College.', 'timestamp': 1352610000, 'upload_date': '20121111', }, }, ] def _get_thumbnail_url(self, uri, itemdoc): return 'http://mtv.mtvnimages.com/uri/' + uri def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('videoid') uri = mobj.groupdict().get('mgid') if uri is None: webpage = self._download_webpage(url, video_id) # Some videos come from Vevo.com m_vevo = re.search( r'(?s)isVevoVideo = true;.*?vevoVideoId = "(.*?)";', webpage) if m_vevo: vevo_id = m_vevo.group(1) self.to_screen('Vevo video detected: %s' % vevo_id) return self.url_result('vevo:%s' % vevo_id, ie='Vevo') uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, 'uri') return self._get_videos_info(uri) class MTVDEIE(MTVServicesInfoExtractor): IE_NAME = 'mtv.de' _VALID_URL = r'https?://(?:www\.)?mtv\.de/(?:musik/videoclips|folgen|news)/(?P<id>[0-9a-z]+)' _TESTS = [{ 'url': 'http://www.mtv.de/musik/videoclips/2gpnv7/Traum', 'info_dict': { 'id': 'd5d472bc-f5b7-11e5-bffd-a4badb20dab5', 'ext': 'mp4', 'title': 'Traum', 'description': 'Traum', }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Blocked at Travis CI', }, { # mediagen URL without query (e.g. http://videos.mtvnn.com/mediagen/e865da714c166d18d6f80893195fcb97) 'url': 'http://www.mtv.de/folgen/6b1ylu/teen-mom-2-enthuellungen-S5-F1', 'info_dict': { 'id': '1e5a878b-31c5-11e7-a442-0e40cf2fc285', 'ext': 'mp4', 'title': 'Teen Mom 2', 'description': 'md5:dc65e357ef7e1085ed53e9e9d83146a7', }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Blocked at Travis CI', }, { 'url': 'http://www.mtv.de/news/glolix/77491-mtv-movies-spotlight--pixels--teil-3', 'info_dict': { 'id': 'local_playlist-4e760566473c4c8c5344', 'ext': 'mp4', 'title': 'Article_mtv-movies-spotlight-pixels-teil-3_short-clips_part1', 'description': 'MTV Movies Supercut', }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Das Video kann zur Zeit nicht abgespielt werden.', }] _GEO_COUNTRIES = ['DE'] _FEED_URL = 'http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed' def _get_feed_query(self, uri): return { 'arcEp': 'mtv.de', 'mgid': uri, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/muenchentv.py
# coding: utf-8 from __future__ import unicode_literals import json from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, js_to_json, ) class MuenchenTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?muenchen\.tv/livestream' IE_DESC = 'münchen.tv' _TEST = { 'url': 'http://www.muenchen.tv/livestream/', 'info_dict': { 'id': '5334', 'display_id': 'live', 'ext': 'mp4', 'title': 're:^münchen.tv-Livestream [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'is_live': True, 'thumbnail': r're:^https?://.*\.jpg$' }, 'params': { 'skip_download': True, } } def _real_extract(self, url): display_id = 'live' webpage = self._download_webpage(url, display_id) title = self._live_title(self._og_search_title(webpage)) data_js = self._search_regex( r'(?s)\nplaylist:\s*(\[.*?}\]),', webpage, 'playlist configuration') data_json = js_to_json(data_js) data = json.loads(data_json)[0] video_id = data['mediaid'] thumbnail = data.get('image') formats = [] for format_num, s in enumerate(data['sources']): ext = determine_ext(s['file'], None) label_str = s.get('label') if label_str is None: label_str = '_%d' % format_num if ext is None: format_id = label_str else: format_id = '%s-%s' % (ext, label_str) formats.append({ 'url': s['file'], 'tbr': int_or_none(s.get('label')), 'ext': 'mp4', 'format_id': format_id, 'preference': -100 if '.smil' in s['file'] else 0, }) self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'title': title, 'formats': formats, 'is_live': True, 'thumbnail': thumbnail, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/musicplayon.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( int_or_none, js_to_json, mimetype2ext, ) class MusicPlayOnIE(InfoExtractor): _VALID_URL = r'https?://(?:.+?\.)?musicplayon\.com/play(?:-touch)?\?(?:v|pl=\d+&play)=(?P<id>\d+)' _TESTS = [{ 'url': 'http://en.musicplayon.com/play?v=433377', 'md5': '00cdcdea1726abdf500d1e7fd6dd59bb', 'info_dict': { 'id': '433377', 'ext': 'mp4', 'title': 'Rick Ross - Interview On Chelsea Lately (2014)', 'description': 'Rick Ross Interview On Chelsea Lately', 'duration': 342, 'uploader': 'ultrafish', }, }, { 'url': 'http://en.musicplayon.com/play?pl=102&play=442629', 'only_matching': True, }] _URL_TEMPLATE = 'http://en.musicplayon.com/play?v=%s' def _real_extract(self, url): video_id = self._match_id(url) url = self._URL_TEMPLATE % video_id page = self._download_webpage(url, video_id) title = self._og_search_title(page) description = self._og_search_description(page) thumbnail = self._og_search_thumbnail(page) duration = self._html_search_meta('video:duration', page, 'duration', fatal=False) view_count = self._og_search_property('count', page, fatal=False) uploader = self._html_search_regex( r'<div>by&nbsp;<a href="[^"]+" class="purple">([^<]+)</a></div>', page, 'uploader', fatal=False) sources = self._parse_json( self._search_regex(r'setup\[\'_sources\'\]\s*=\s*([^;]+);', page, 'video sources'), video_id, transform_source=js_to_json) formats = [{ 'url': compat_urlparse.urljoin(url, source['src']), 'ext': mimetype2ext(source.get('type')), 'format_note': source.get('data-res'), } for source in sources] return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'duration': int_or_none(duration), 'view_count': int_or_none(view_count), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mwave.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, parse_duration, ) class MwaveIE(InfoExtractor): _VALID_URL = r'https?://mwave\.interest\.me/(?:[^/]+/)?mnettv/videodetail\.m\?searchVideoDetailVO\.clip_id=(?P<id>[0-9]+)' _URL_TEMPLATE = 'http://mwave.interest.me/mnettv/videodetail.m?searchVideoDetailVO.clip_id=%s' _TESTS = [{ 'url': 'http://mwave.interest.me/mnettv/videodetail.m?searchVideoDetailVO.clip_id=168859', # md5 is unstable 'info_dict': { 'id': '168859', 'ext': 'flv', 'title': '[M COUNTDOWN] SISTAR - SHAKE IT', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'M COUNTDOWN', 'duration': 206, 'view_count': int, } }, { 'url': 'http://mwave.interest.me/en/mnettv/videodetail.m?searchVideoDetailVO.clip_id=176199', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) vod_info = self._download_json( 'http://mwave.interest.me/onair/vod_info.m?vodtype=CL&sectorid=&endinfo=Y&id=%s' % video_id, video_id, 'Download vod JSON') formats = [] for num, cdn_info in enumerate(vod_info['cdn']): stream_url = cdn_info.get('url') if not stream_url: continue stream_name = cdn_info.get('name') or compat_str(num) f4m_stream = self._download_json( stream_url, video_id, 'Download %s stream JSON' % stream_name) f4m_url = f4m_stream.get('fileurl') if not f4m_url: continue formats.extend( self._extract_f4m_formats(f4m_url + '&hdcore=3.0.3', video_id, f4m_id=stream_name)) self._sort_formats(formats) return { 'id': video_id, 'title': vod_info['title'], 'thumbnail': vod_info.get('cover'), 'uploader': vod_info.get('program_title'), 'duration': parse_duration(vod_info.get('time')), 'view_count': int_or_none(vod_info.get('hit')), 'formats': formats, } class MwaveMeetGreetIE(InfoExtractor): _VALID_URL = r'https?://mwave\.interest\.me/(?:[^/]+/)?meetgreet/view/(?P<id>\d+)' _TESTS = [{ 'url': 'http://mwave.interest.me/meetgreet/view/256', 'info_dict': { 'id': '173294', 'ext': 'flv', 'title': '[MEET&GREET] Park BoRam', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Mwave', 'duration': 3634, 'view_count': int, } }, { 'url': 'http://mwave.interest.me/en/meetgreet/view/256', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) clip_id = self._html_search_regex( r'<iframe[^>]+src="/mnettv/ifr_clip\.m\?searchVideoDetailVO\.clip_id=(\d+)', webpage, 'clip ID') clip_url = MwaveIE._URL_TEMPLATE % clip_id return self.url_result(clip_url, 'Mwave', clip_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/mychannels.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class MyChannelsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?mychannels\.com/.*(?P<id_type>video|production)_id=(?P<id>[0-9]+)' _TEST = { 'url': 'https://mychannels.com/missholland/miss-holland?production_id=3416', 'md5': 'b8993daad4262dd68d89d651c0c52c45', 'info_dict': { 'id': 'wUUDZZep6vQD', 'ext': 'mp4', 'title': 'Miss Holland joins VOTE LEAVE', 'description': 'Miss Holland | #13 Not a potato', 'uploader': 'Miss Holland', } } def _real_extract(self, url): id_type, url_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, url_id) video_data = self._html_search_regex(r'<div([^>]+data-%s-id="%s"[^>]+)>' % (id_type, url_id), webpage, 'video data') def extract_data_val(attr, fatal=False): return self._html_search_regex(r'data-%s\s*=\s*"([^"]+)"' % attr, video_data, attr, fatal=fatal) minoto_id = extract_data_val('minoto-id') or self._search_regex(r'/id/([a-zA-Z0-9]+)', extract_data_val('video-src', True), 'minoto id') return { '_type': 'url_transparent', 'url': 'minoto:%s' % minoto_id, 'id': url_id, 'title': extract_data_val('title', True), 'description': extract_data_val('description'), 'thumbnail': extract_data_val('image'), 'uploader': extract_data_val('channel'), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/myspace.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, parse_iso8601, ) class MySpaceIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// myspace\.com/[^/]+/ (?P<mediatype> video/[^/]+/(?P<video_id>\d+)| music/song/[^/?#&]+-(?P<song_id>\d+)-\d+(?:[/?#&]|$) ) ''' _TESTS = [{ 'url': 'https://myspace.com/fiveminutestothestage/video/little-big-town/109594919', 'md5': '9c1483c106f4a695c47d2911feed50a7', 'info_dict': { 'id': '109594919', 'ext': 'mp4', 'title': 'Little Big Town', 'description': 'This country quartet was all smiles while playing a sold out show at the Pacific Amphitheatre in Orange County, California.', 'uploader': 'Five Minutes to the Stage', 'uploader_id': 'fiveminutestothestage', 'timestamp': 1414108751, 'upload_date': '20141023', }, }, { # songs 'url': 'https://myspace.com/killsorrow/music/song/of-weakened-soul...-93388656-103880681', 'md5': '1d7ee4604a3da226dd69a123f748b262', 'info_dict': { 'id': '93388656', 'ext': 'm4a', 'title': 'Of weakened soul...', 'uploader': 'Killsorrow', 'uploader_id': 'killsorrow', }, }, { 'add_ie': ['Youtube'], 'url': 'https://myspace.com/threedaysgrace/music/song/animal-i-have-become-28400208-28218041', 'info_dict': { 'id': 'xqds0B_meys', 'ext': 'webm', 'title': 'Three Days Grace - Animal I Have Become', 'description': 'md5:8bd86b3693e72a077cf863a8530c54bb', 'uploader': 'ThreeDaysGraceVEVO', 'uploader_id': 'ThreeDaysGraceVEVO', 'upload_date': '20091002', }, }, { 'url': 'https://myspace.com/starset2/music/song/first-light-95799905-106964426', 'only_matching': True, }, { 'url': 'https://myspace.com/thelargemouthbassband/music/song/02-pure-eyes.mp3-94422330-105113388', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('video_id') or mobj.group('song_id') is_song = mobj.group('mediatype').startswith('music/song') webpage = self._download_webpage(url, video_id) player_url = self._search_regex( r'videoSwf":"([^"?]*)', webpage, 'player URL', fatal=False) def formats_from_stream_urls(stream_url, hls_stream_url, http_stream_url, width=None, height=None): formats = [] vcodec = 'none' if is_song else None if hls_stream_url: formats.append({ 'format_id': 'hls', 'url': hls_stream_url, 'protocol': 'm3u8_native', 'ext': 'm4a' if is_song else 'mp4', 'vcodec': vcodec, }) if stream_url and player_url: rtmp_url, play_path = stream_url.split(';', 1) formats.append({ 'format_id': 'rtmp', 'url': rtmp_url, 'play_path': play_path, 'player_url': player_url, 'protocol': 'rtmp', 'ext': 'flv', 'width': width, 'height': height, 'vcodec': vcodec, }) if http_stream_url: formats.append({ 'format_id': 'http', 'url': http_stream_url, 'width': width, 'height': height, 'vcodec': vcodec, }) return formats if is_song: # songs don't store any useful info in the 'context' variable song_data = self._search_regex( r'''<button.*data-song-id=(["\'])%s\1.*''' % video_id, webpage, 'song_data', default=None, group=0) if song_data is None: # some songs in an album are not playable self.report_warning( '%s: No downloadable song on this page' % video_id) return def search_data(name): return self._search_regex( r'''data-%s=([\'"])(?P<data>.*?)\1''' % name, song_data, name, default='', group='data') formats = formats_from_stream_urls( search_data('stream-url'), search_data('hls-stream-url'), search_data('http-stream-url')) if not formats: vevo_id = search_data('vevo-id') youtube_id = search_data('youtube-id') if vevo_id: self.to_screen('Vevo video detected: %s' % vevo_id) return self.url_result('vevo:%s' % vevo_id, ie='Vevo') elif youtube_id: self.to_screen('Youtube video detected: %s' % youtube_id) return self.url_result(youtube_id, ie='Youtube') else: raise ExtractorError( 'Found song but don\'t know how to download it') self._sort_formats(formats) return { 'id': video_id, 'title': self._og_search_title(webpage), 'uploader': search_data('artist-name'), 'uploader_id': search_data('artist-username'), 'thumbnail': self._og_search_thumbnail(webpage), 'duration': int_or_none(search_data('duration')), 'formats': formats, } else: video = self._parse_json(self._search_regex( r'context = ({.*?});', webpage, 'context'), video_id)['video'] formats = formats_from_stream_urls( video.get('streamUrl'), video.get('hlsStreamUrl'), video.get('mp4StreamUrl'), int_or_none(video.get('width')), int_or_none(video.get('height'))) self._sort_formats(formats) return { 'id': video_id, 'title': video['title'], 'description': video.get('description'), 'thumbnail': video.get('imageUrl'), 'uploader': video.get('artistName'), 'uploader_id': video.get('artistUsername'), 'duration': int_or_none(video.get('duration')), 'timestamp': parse_iso8601(video.get('dateAdded')), 'formats': formats, } class MySpaceAlbumIE(InfoExtractor): IE_NAME = 'MySpace:album' _VALID_URL = r'https?://myspace\.com/([^/]+)/music/album/(?P<title>.*-)(?P<id>\d+)' _TESTS = [{ 'url': 'https://myspace.com/starset2/music/album/transmissions-19455773', 'info_dict': { 'title': 'Transmissions', 'id': '19455773', }, 'playlist_count': 14, 'skip': 'this album is only available in some countries', }, { 'url': 'https://myspace.com/killsorrow/music/album/the-demo-18596029', 'info_dict': { 'title': 'The Demo', 'id': '18596029', }, 'playlist_count': 5, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) playlist_id = mobj.group('id') display_id = mobj.group('title') + playlist_id webpage = self._download_webpage(url, display_id) tracks_paths = re.findall(r'"music:song" content="(.*?)"', webpage) if not tracks_paths: raise ExtractorError( '%s: No songs found, try using proxy' % display_id, expected=True) entries = [ self.url_result(t_path, ie=MySpaceIE.ie_key()) for t_path in tracks_paths] return { '_type': 'playlist', 'id': playlist_id, 'display_id': display_id, 'title': self._og_search_title(webpage), 'entries': entries, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/myspass.py
from __future__ import unicode_literals import os.path from .common import InfoExtractor from ..compat import ( compat_urllib_parse_urlparse, ) from ..utils import ( ExtractorError, ) class MySpassIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?myspass\.de/.*' _TEST = { 'url': 'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/', 'md5': '0b49f4844a068f8b33f4b7c88405862b', 'info_dict': { 'id': '11741', 'ext': 'mp4', 'description': 'Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?', 'title': 'Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2', }, } def _real_extract(self, url): META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s' # video id is the last path element of the URL # usually there is a trailing slash, so also try the second but last url_path = compat_urllib_parse_urlparse(url).path url_parent_path, video_id = os.path.split(url_path) if not video_id: _, video_id = os.path.split(url_parent_path) # get metadata metadata_url = META_DATA_URL_TEMPLATE % video_id metadata = self._download_xml( metadata_url, video_id, transform_source=lambda s: s.strip()) # extract values from metadata url_flv_el = metadata.find('url_flv') if url_flv_el is None: raise ExtractorError('Unable to extract download url') video_url = url_flv_el.text title_el = metadata.find('title') if title_el is None: raise ExtractorError('Unable to extract title') title = title_el.text format_id_el = metadata.find('format_id') if format_id_el is None: format = 'mp4' else: format = format_id_el.text description_el = metadata.find('description') if description_el is not None: description = description_el.text else: description = None imagePreview_el = metadata.find('imagePreview') if imagePreview_el is not None: thumbnail = imagePreview_el.text else: thumbnail = None return { 'id': video_id, 'url': video_url, 'title': title, 'format': format, 'thumbnail': thumbnail, 'description': description, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/myvi.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from .vimple import SprutoBaseIE class MyviIE(SprutoBaseIE): _VALID_URL = r'''(?x) (?: https?:// (?:www\.)? myvi\. (?: (?:ru/player|tv)/ (?: (?: embed/html| flash| api/Video/Get )/| content/preloader\.swf\?.*\bid= )| ru/watch/ )| myvi: ) (?P<id>[\da-zA-Z_-]+) ''' _TESTS = [{ 'url': 'http://myvi.ru/player/embed/html/oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wObeRTZaCATzucDQIDph8hQU0', 'md5': '571bbdfba9f9ed229dc6d34cc0f335bf', 'info_dict': { 'id': 'f16b2bbd-cde8-481c-a981-7cd48605df43', 'ext': 'mp4', 'title': 'хозяин жизни', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 25, }, }, { 'url': 'http://myvi.ru/player/content/preloader.swf?id=oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wOYf1WFpPfc_bWTKGVf_Zafr0', 'only_matching': True, }, { 'url': 'http://myvi.ru/player/api/Video/Get/oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wObeRTZaCATzucDQIDph8hQU0', 'only_matching': True, }, { 'url': 'http://myvi.tv/embed/html/oTGTNWdyz4Zwy_u1nraolwZ1odenTd9WkTnRfIL9y8VOgHYqOHApE575x4_xxS9Vn0?ap=0', 'only_matching': True, }, { 'url': 'http://myvi.ru/player/flash/ocp2qZrHI-eZnHKQBK4cZV60hslH8LALnk0uBfKsB-Q4WnY26SeGoYPi8HWHxu0O30', 'only_matching': True, }, { 'url': 'https://www.myvi.ru/watch/YwbqszQynUaHPn_s82sx0Q2', 'only_matching': True, }, { 'url': 'myvi:YwbqszQynUaHPn_s82sx0Q2', 'only_matching': True, }] @classmethod def _extract_url(cls, webpage): mobj = re.search( r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//myvi\.(?:ru/player|tv)/(?:embed/html|flash)/[^"]+)\1', webpage) if mobj: return mobj.group('url') def _real_extract(self, url): video_id = self._match_id(url) spruto = self._download_json( 'http://myvi.ru/player/api/Video/Get/%s?sig' % video_id, video_id)['sprutoData'] return self._extract_spruto(spruto, video_id) class MyviEmbedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?myvi\.tv/(?:[^?]+\?.*?\bv=|embed/)(?P<id>[\da-z]+)' _TESTS = [{ 'url': 'https://www.myvi.tv/embed/ccdqic3wgkqwpb36x9sxg43t4r', 'info_dict': { 'id': 'b3ea0663-3234-469d-873e-7fecf36b31d1', 'ext': 'mp4', 'title': 'Твоя (original song).mp4', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 277, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.myvi.tv/idmi6o?v=ccdqic3wgkqwpb36x9sxg43t4r#watch', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if MyviIE.suitable(url) else super(MyviEmbedIE, cls).suitable(url) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://www.myvi.tv/embed/%s' % video_id, video_id) myvi_id = self._search_regex( r'CreatePlayer\s*\(\s*["\'].*?\bv=([\da-zA-Z_]+)', webpage, 'video id') return self.url_result('myvi:%s' % myvi_id, ie=MyviIE.ie_key())
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/myvidster.py
from __future__ import unicode_literals from .common import InfoExtractor class MyVidsterIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?myvidster\.com/video/(?P<id>\d+)/' _TEST = { 'url': 'http://www.myvidster.com/video/32059805/Hot_chemistry_with_raw_love_making', 'md5': '95296d0231c1363222c3441af62dc4ca', 'info_dict': { 'id': '3685814', 'title': 'md5:7d8427d6d02c4fbcef50fe269980c749', 'upload_date': '20141027', 'uploader': 'utkualp', 'ext': 'mp4', 'age_limit': 18, }, 'add_ie': ['XHamster'], } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) return self.url_result(self._html_search_regex( r'rel="videolink" href="(?P<real_url>.*)">', webpage, 'real video url'))
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/nationalgeographic.py
from __future__ import unicode_literals from .common import InfoExtractor from .fox import FOXIE from ..utils import ( smuggle_url, url_basename, ) class NationalGeographicVideoIE(InfoExtractor): IE_NAME = 'natgeo:video' _VALID_URL = r'https?://video\.nationalgeographic\.com/.*?' _TESTS = [ { 'url': 'http://video.nationalgeographic.com/video/news/150210-news-crab-mating-vin?source=featuredvideo', 'md5': '730855d559abbad6b42c2be1fa584917', 'info_dict': { 'id': '0000014b-70a1-dd8c-af7f-f7b559330001', 'ext': 'mp4', 'title': 'Mating Crabs Busted by Sharks', 'description': 'md5:16f25aeffdeba55aaa8ec37e093ad8b3', 'timestamp': 1423523799, 'upload_date': '20150209', 'uploader': 'NAGS', }, 'add_ie': ['ThePlatform'], }, { 'url': 'http://video.nationalgeographic.com/wild/when-sharks-attack/the-real-jaws', 'md5': '6a3105eb448c070503b3105fb9b320b5', 'info_dict': { 'id': 'ngc-I0IauNSWznb_UV008GxSbwY35BZvgi2e', 'ext': 'mp4', 'title': 'The Real Jaws', 'description': 'md5:8d3e09d9d53a85cd397b4b21b2c77be6', 'timestamp': 1433772632, 'upload_date': '20150608', 'uploader': 'NAGS', }, 'add_ie': ['ThePlatform'], }, ] def _real_extract(self, url): name = url_basename(url) webpage = self._download_webpage(url, name) guid = self._search_regex( r'id="(?:videoPlayer|player-container)"[^>]+data-guid="([^"]+)"', webpage, 'guid') return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': smuggle_url( 'http://link.theplatform.com/s/ngs/media/guid/2423130747/%s?mbr=true' % guid, {'force_smil_url': True}), 'id': guid, } class NationalGeographicTVIE(FOXIE): _VALID_URL = r'https?://(?:www\.)?nationalgeographic\.com/tv/watch/(?P<id>[\da-fA-F]+)' _TESTS = [{ 'url': 'https://www.nationalgeographic.com/tv/watch/6a875e6e734b479beda26438c9f21138/', 'info_dict': { 'id': '6a875e6e734b479beda26438c9f21138', 'ext': 'mp4', 'title': 'Why Nat Geo? Valley of the Boom', 'description': 'The lives of prominent figures in the tech world, including their friendships, rivalries, victories and failures.', 'timestamp': 1542662458, 'upload_date': '20181119', 'age_limit': 14, }, 'params': { 'skip_download': True, }, }] _HOME_PAGE_URL = 'https://www.nationalgeographic.com/tv/' _API_KEY = '238bb0a0c2aba67922c48709ce0c06fd'
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/naver.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, update_url_query, ) class NaverIE(InfoExtractor): _VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/v/(?P<id>\d+)' _TESTS = [{ 'url': 'http://tv.naver.com/v/81652', 'info_dict': { 'id': '81652', 'ext': 'mp4', 'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번', 'description': '합격불변의 법칙 메가스터디 | 메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.', 'upload_date': '20130903', }, }, { 'url': 'http://tv.naver.com/v/395837', 'md5': '638ed4c12012c458fefcddfd01f173cd', 'info_dict': { 'id': '395837', 'ext': 'mp4', 'title': '9년이 지나도 아픈 기억, 전효성의 아버지', 'description': 'md5:5bf200dcbf4b66eb1b350d1eb9c753f7', 'upload_date': '20150519', }, 'skip': 'Georestricted', }, { 'url': 'http://tvcast.naver.com/v/81652', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) vid = self._search_regex( r'videoId["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'video id', fatal=None, group='value') in_key = self._search_regex( r'inKey["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'key', default=None, group='value') if not vid or not in_key: error = self._html_search_regex( r'(?s)<div class="(?:nation_error|nation_box|error_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>', webpage, 'error', default=None) if error: raise ExtractorError(error, expected=True) raise ExtractorError('couldn\'t extract vid and key') video_data = self._download_json( 'http://play.rmcnmv.naver.com/vod/play/v2.0/' + vid, video_id, query={ 'key': in_key, }) meta = video_data['meta'] title = meta['subject'] formats = [] def extract_formats(streams, stream_type, query={}): for stream in streams: stream_url = stream.get('source') if not stream_url: continue stream_url = update_url_query(stream_url, query) encoding_option = stream.get('encodingOption', {}) bitrate = stream.get('bitrate', {}) formats.append({ 'format_id': '%s_%s' % (stream.get('type') or stream_type, encoding_option.get('id') or encoding_option.get('name')), 'url': stream_url, 'width': int_or_none(encoding_option.get('width')), 'height': int_or_none(encoding_option.get('height')), 'vbr': int_or_none(bitrate.get('video')), 'abr': int_or_none(bitrate.get('audio')), 'filesize': int_or_none(stream.get('size')), 'protocol': 'm3u8_native' if stream_type == 'HLS' else None, }) extract_formats(video_data.get('videos', {}).get('list', []), 'H264') for stream_set in video_data.get('streams', []): query = {} for param in stream_set.get('keys', []): query[param['name']] = param['value'] stream_type = stream_set.get('type') videos = stream_set.get('videos') if videos: extract_formats(videos, stream_type, query) elif stream_type == 'HLS': stream_url = stream_set.get('source') if not stream_url: continue formats.extend(self._extract_m3u8_formats( update_url_query(stream_url, query), video_id, 'mp4', 'm3u8_native', m3u8_id=stream_type, fatal=False)) self._sort_formats(formats) subtitles = {} for caption in video_data.get('captions', {}).get('list', []): caption_url = caption.get('source') if not caption_url: continue subtitles.setdefault(caption.get('language') or caption.get('locale'), []).append({ 'url': caption_url, }) upload_date = self._search_regex( r'<span[^>]+class="date".*?(\d{4}\.\d{2}\.\d{2})', webpage, 'upload date', fatal=False) if upload_date: upload_date = upload_date.replace('.', '') return { 'id': video_id, 'title': title, 'formats': formats, 'subtitles': subtitles, 'description': self._og_search_description(webpage), 'thumbnail': meta.get('cover', {}).get('source') or self._og_search_thumbnail(webpage), 'view_count': int_or_none(meta.get('count')), 'upload_date': upload_date, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/nba.py
from __future__ import unicode_literals import functools import re from .turner import TurnerBaseIE from ..compat import ( compat_urllib_parse_urlencode, compat_urlparse, ) from ..utils import ( OnDemandPagedList, remove_start, ) class NBAIE(TurnerBaseIE): _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?P<path>(?:[^/]+/)+(?P<id>[^?]*?))/?(?:/index\.html)?(?:\?.*)?$' _TESTS = [{ 'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html', 'md5': '9e7729d3010a9c71506fd1248f74e4f4', 'info_dict': { 'id': '0021200253-okc-bkn-recap', 'ext': 'mp4', 'title': 'Thunder vs. Nets', 'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.', 'duration': 181, 'timestamp': 1354638466, 'upload_date': '20121204', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.nba.com/video/games/hornets/2014/12/05/0021400276-nyk-cha-play5.nba/', 'only_matching': True, }, { 'url': 'http://watch.nba.com/video/channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba', 'md5': 'b2b39b81cf28615ae0c3360a3f9668c4', 'info_dict': { 'id': 'channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba', 'ext': 'mp4', 'title': 'Hawks vs. Cavaliers Game 1', 'description': 'md5:8094c3498d35a9bd6b1a8c396a071b4d', 'duration': 228, 'timestamp': 1432134543, 'upload_date': '20150520', }, 'expected_warnings': ['Unable to download f4m manifest'], }, { 'url': 'http://www.nba.com/clippers/news/doc-rivers-were-not-trading-blake', 'info_dict': { 'id': 'teams/clippers/2016/02/17/1455672027478-Doc_Feb16_720.mov-297324', 'ext': 'mp4', 'title': 'Practice: Doc Rivers - 2/16/16', 'description': 'Head Coach Doc Rivers addresses the media following practice.', 'upload_date': '20160216', 'timestamp': 1455672000, }, 'params': { # m3u8 download 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest'], }, { 'url': 'http://www.nba.com/timberwolves/wiggins-shootaround#', 'info_dict': { 'id': 'timberwolves', 'title': 'Shootaround Access - Dec. 12 | Andrew Wiggins', }, 'playlist_count': 30, 'params': { # Download the whole playlist takes too long time 'playlist_items': '1-30', }, }, { 'url': 'http://www.nba.com/timberwolves/wiggins-shootaround#', 'info_dict': { 'id': 'teams/timberwolves/2014/12/12/Wigginsmp4-3462601', 'ext': 'mp4', 'title': 'Shootaround Access - Dec. 12 | Andrew Wiggins', 'description': 'Wolves rookie Andrew Wiggins addresses the media after Friday\'s shootaround.', 'upload_date': '20141212', 'timestamp': 1418418600, }, 'params': { 'noplaylist': True, # m3u8 download 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest'], }] _PAGE_SIZE = 30 def _fetch_page(self, team, video_id, page): search_url = 'http://searchapp2.nba.com/nba-search/query.jsp?' + compat_urllib_parse_urlencode({ 'type': 'teamvideo', 'start': page * self._PAGE_SIZE + 1, 'npp': (page + 1) * self._PAGE_SIZE + 1, 'sort': 'recent', 'output': 'json', 'site': team, }) results = self._download_json( search_url, video_id, note='Download page %d of playlist data' % page)['results'][0] for item in results: yield self.url_result(compat_urlparse.urljoin('http://www.nba.com/', item['url'])) def _extract_playlist(self, orig_path, video_id, webpage): team = orig_path.split('/')[0] if self._downloader.params.get('noplaylist'): self.to_screen('Downloading just video because of --no-playlist') video_path = self._search_regex( r'nbaVideoCore\.firstVideo\s*=\s*\'([^\']+)\';', webpage, 'video path') video_url = 'http://www.nba.com/%s/video/%s' % (team, video_path) return self.url_result(video_url) self.to_screen('Downloading playlist - add --no-playlist to just download video') playlist_title = self._og_search_title(webpage, fatal=False) entries = OnDemandPagedList( functools.partial(self._fetch_page, team, video_id), self._PAGE_SIZE) return self.playlist_result(entries, team, playlist_title) def _real_extract(self, url): path, video_id = re.match(self._VALID_URL, url).groups() orig_path = path if path.startswith('nba/'): path = path[3:] if 'video/' not in path: webpage = self._download_webpage(url, video_id) path = remove_start(self._search_regex(r'data-videoid="([^"]+)"', webpage, 'video id'), '/') if path == '{{id}}': return self._extract_playlist(orig_path, video_id, webpage) # See prepareContentId() of pkgCvp.js if path.startswith('video/teams'): path = 'video/channels/proxy/' + path[6:] return self._extract_cvp_info( 'http://www.nba.com/%s.xml' % path, video_id, { 'default': { 'media_src': 'http://nba.cdn.turner.com/nba/big', }, 'm3u8': { 'media_src': 'http://nbavod-f.akamaihd.net', }, })
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/nbc.py
from __future__ import unicode_literals import base64 import json import re from .common import InfoExtractor from .theplatform import ThePlatformIE from .adobepass import AdobePassIE from ..compat import compat_urllib_parse_unquote from ..utils import ( int_or_none, js_to_json, parse_duration, smuggle_url, try_get, unified_timestamp, update_url_query, ) class NBCIE(AdobePassIE): _VALID_URL = r'https?(?P<permalink>://(?:www\.)?nbc\.com/(?:classic-tv/)?[^/]+/video/[^/]+/(?P<id>n?\d+))' _TESTS = [ { 'url': 'http://www.nbc.com/the-tonight-show/video/jimmy-fallon-surprises-fans-at-ben-jerrys/2848237', 'info_dict': { 'id': '2848237', 'ext': 'mp4', 'title': 'Jimmy Fallon Surprises Fans at Ben & Jerry\'s', 'description': 'Jimmy gives out free scoops of his new "Tonight Dough" ice cream flavor by surprising customers at the Ben & Jerry\'s scoop shop.', 'timestamp': 1424246400, 'upload_date': '20150218', 'uploader': 'NBCU-COM', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.nbc.com/saturday-night-live/video/star-wars-teaser/2832821', 'info_dict': { 'id': '2832821', 'ext': 'mp4', 'title': 'Star Wars Teaser', 'description': 'md5:0b40f9cbde5b671a7ff62fceccc4f442', 'timestamp': 1417852800, 'upload_date': '20141206', 'uploader': 'NBCU-COM', }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': 'Only works from US', }, { # HLS streams requires the 'hdnea3' cookie 'url': 'http://www.nbc.com/Kings/video/goliath/n1806', 'info_dict': { 'id': '101528f5a9e8127b107e98c5e6ce4638', 'ext': 'mp4', 'title': 'Goliath', 'description': 'When an unknown soldier saves the life of the King\'s son in battle, he\'s thrust into the limelight and politics of the kingdom.', 'timestamp': 1237100400, 'upload_date': '20090315', 'uploader': 'NBCU-COM', }, 'params': { 'skip_download': True, }, 'skip': 'Only works from US', }, { 'url': 'https://www.nbc.com/classic-tv/charles-in-charge/video/charles-in-charge-pilot/n3310', 'only_matching': True, }, { # Percent escaped url 'url': 'https://www.nbc.com/up-all-night/video/day-after-valentine%27s-day/n2189', 'only_matching': True, } ] def _real_extract(self, url): permalink, video_id = re.match(self._VALID_URL, url).groups() permalink = 'http' + compat_urllib_parse_unquote(permalink) response = self._download_json( 'https://friendship.nbc.co/v2/graphql', video_id, query={ 'query': '''{ page(name: "%s", platform: web, type: VIDEO, userId: "0") { data { ... on VideoPageData { description episodeNumber keywords locked mpxAccountId mpxGuid rating seasonNumber secondaryTitle seriesShortTitle } } } }''' % permalink, }) video_data = response['data']['page']['data'] query = { 'mbr': 'true', 'manifest': 'm3u', } video_id = video_data['mpxGuid'] title = video_data['secondaryTitle'] if video_data.get('locked'): resource = self._get_mvpd_resource( 'nbcentertainment', title, video_id, video_data.get('rating')) query['auth'] = self._extract_mvpd_auth( url, video_id, 'nbcentertainment', resource) theplatform_url = smuggle_url(update_url_query( 'http://link.theplatform.com/s/NnzsPC/media/guid/%s/%s' % (video_data.get('mpxAccountId') or '2410887629', video_id), query), {'force_smil_url': True}) return { '_type': 'url_transparent', 'id': video_id, 'title': title, 'url': theplatform_url, 'description': video_data.get('description'), 'tags': video_data.get('keywords'), 'season_number': int_or_none(video_data.get('seasonNumber')), 'episode_number': int_or_none(video_data.get('episodeNumber')), 'episode': title, 'series': video_data.get('seriesShortTitle'), 'ie_key': 'ThePlatform', } class NBCSportsVPlayerIE(InfoExtractor): _VALID_URL = r'https?://vplayer\.nbcsports\.com/(?:[^/]+/)+(?P<id>[0-9a-zA-Z_]+)' _TESTS = [{ 'url': 'https://vplayer.nbcsports.com/p/BxmELC/nbcsports_embed/select/9CsDKds0kvHI', 'info_dict': { 'id': '9CsDKds0kvHI', 'ext': 'mp4', 'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d', 'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson', 'timestamp': 1426270238, 'upload_date': '20150313', 'uploader': 'NBCU-SPORTS', } }, { 'url': 'https://vplayer.nbcsports.com/p/BxmELC/nbcsports_embed/select/media/_hqLjQ95yx8Z', 'only_matching': True, }] @staticmethod def _extract_url(webpage): iframe_m = re.search( r'<iframe[^>]+src="(?P<url>https?://vplayer\.nbcsports\.com/[^"]+)"', webpage) if iframe_m: return iframe_m.group('url') def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) theplatform_url = self._og_search_video_url(webpage).replace( 'vplayer.nbcsports.com', 'player.theplatform.com') return self.url_result(theplatform_url, 'ThePlatform') class NBCSportsIE(InfoExtractor): # Does not include https because its certificate is invalid _VALID_URL = r'https?://(?:www\.)?nbcsports\.com//?(?:[^/]+/)+(?P<id>[0-9a-z-]+)' _TEST = { 'url': 'http://www.nbcsports.com//college-basketball/ncaab/tom-izzo-michigan-st-has-so-much-respect-duke', 'info_dict': { 'id': 'PHJSaFWbrTY9', 'ext': 'flv', 'title': 'Tom Izzo, Michigan St. has \'so much respect\' for Duke', 'description': 'md5:ecb459c9d59e0766ac9c7d5d0eda8113', 'uploader': 'NBCU-SPORTS', 'upload_date': '20150330', 'timestamp': 1427726529, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) return self.url_result( NBCSportsVPlayerIE._extract_url(webpage), 'NBCSportsVPlayer') class NBCSportsStreamIE(AdobePassIE): _VALID_URL = r'https?://stream\.nbcsports\.com/.+?\bpid=(?P<id>\d+)' _TEST = { 'url': 'http://stream.nbcsports.com/nbcsn/generic?pid=206559', 'info_dict': { 'id': '206559', 'ext': 'mp4', 'title': 'Amgen Tour of California Women\'s Recap', 'description': 'md5:66520066b3b5281ada7698d0ea2aa894', }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': 'Requires Adobe Pass Authentication', } def _real_extract(self, url): video_id = self._match_id(url) live_source = self._download_json( 'http://stream.nbcsports.com/data/live_sources_%s.json' % video_id, video_id) video_source = live_source['videoSources'][0] title = video_source['title'] source_url = None for k in ('source', 'msl4source', 'iossource', 'hlsv4'): sk = k + 'Url' source_url = video_source.get(sk) or video_source.get(sk + 'Alt') if source_url: break else: source_url = video_source['ottStreamUrl'] is_live = video_source.get('type') == 'live' or video_source.get('status') == 'Live' resource = self._get_mvpd_resource('nbcsports', title, video_id, '') token = self._extract_mvpd_auth(url, video_id, 'nbcsports', resource) tokenized_url = self._download_json( 'https://token.playmakerservices.com/cdn', video_id, data=json.dumps({ 'requestorId': 'nbcsports', 'pid': video_id, 'application': 'NBCSports', 'version': 'v1', 'platform': 'desktop', 'cdn': 'akamai', 'url': video_source['sourceUrl'], 'token': base64.b64encode(token.encode()).decode(), 'resourceId': base64.b64encode(resource.encode()).decode(), }).encode())['tokenizedUrl'] formats = self._extract_m3u8_formats(tokenized_url, video_id, 'mp4') self._sort_formats(formats) return { 'id': video_id, 'title': self._live_title(title) if is_live else title, 'description': live_source.get('description'), 'formats': formats, 'is_live': is_live, } class CSNNEIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?csnne\.com/video/(?P<id>[0-9a-z-]+)' _TEST = { 'url': 'http://www.csnne.com/video/snc-evening-update-wright-named-red-sox-no-5-starter', 'info_dict': { 'id': 'yvBLLUgQ8WU0', 'ext': 'mp4', 'title': 'SNC evening update: Wright named Red Sox\' No. 5 starter.', 'description': 'md5:1753cfee40d9352b19b4c9b3e589b9e3', 'timestamp': 1459369979, 'upload_date': '20160330', 'uploader': 'NBCU-SPORTS', } } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': self._html_search_meta('twitter:player:stream', webpage), 'display_id': display_id, } class NBCNewsIE(ThePlatformIE): _VALID_URL = r'(?x)https?://(?:www\.)?(?:nbcnews|today|msnbc)\.com/([^/]+/)*(?:.*-)?(?P<id>[^/?]+)' _TESTS = [ { 'url': 'http://www.nbcnews.com/watch/nbcnews-com/how-twitter-reacted-to-the-snowden-interview-269389891880', 'md5': 'cf4bc9e6ce0130f00f545d80ecedd4bf', 'info_dict': { 'id': '269389891880', 'ext': 'mp4', 'title': 'How Twitter Reacted To The Snowden Interview', 'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64', 'timestamp': 1401363060, 'upload_date': '20140529', }, }, { 'url': 'http://www.nbcnews.com/feature/dateline-full-episodes/full-episode-family-business-n285156', 'md5': 'fdbf39ab73a72df5896b6234ff98518a', 'info_dict': { 'id': '529953347624', 'ext': 'mp4', 'title': 'FULL EPISODE: Family Business', 'description': 'md5:757988edbaae9d7be1d585eb5d55cc04', }, 'skip': 'This page is unavailable.', }, { 'url': 'http://www.nbcnews.com/nightly-news/video/nightly-news-with-brian-williams-full-broadcast-february-4-394064451844', 'md5': '8eb831eca25bfa7d25ddd83e85946548', 'info_dict': { 'id': '394064451844', 'ext': 'mp4', 'title': 'Nightly News with Brian Williams Full Broadcast (February 4)', 'description': 'md5:1c10c1eccbe84a26e5debb4381e2d3c5', 'timestamp': 1423104900, 'upload_date': '20150205', }, }, { 'url': 'http://www.nbcnews.com/business/autos/volkswagen-11-million-vehicles-could-have-suspect-software-emissions-scandal-n431456', 'md5': '4a8c4cec9e1ded51060bdda36ff0a5c0', 'info_dict': { 'id': 'n431456', 'ext': 'mp4', 'title': "Volkswagen U.S. Chief: We 'Totally Screwed Up'", 'description': 'md5:d22d1281a24f22ea0880741bb4dd6301', 'upload_date': '20150922', 'timestamp': 1442917800, }, }, { 'url': 'http://www.today.com/video/see-the-aurora-borealis-from-space-in-stunning-new-nasa-video-669831235788', 'md5': '118d7ca3f0bea6534f119c68ef539f71', 'info_dict': { 'id': '669831235788', 'ext': 'mp4', 'title': 'See the aurora borealis from space in stunning new NASA video', 'description': 'md5:74752b7358afb99939c5f8bb2d1d04b1', 'upload_date': '20160420', 'timestamp': 1461152093, }, }, { 'url': 'http://www.msnbc.com/all-in-with-chris-hayes/watch/the-chaotic-gop-immigration-vote-314487875924', 'md5': '6d236bf4f3dddc226633ce6e2c3f814d', 'info_dict': { 'id': '314487875924', 'ext': 'mp4', 'title': 'The chaotic GOP immigration vote', 'description': 'The Republican House votes on a border bill that has no chance of getting through the Senate or signed by the President and is drawing criticism from all sides.', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1406937606, 'upload_date': '20140802', }, }, { 'url': 'http://www.nbcnews.com/watch/dateline/full-episode--deadly-betrayal-386250819952', 'only_matching': True, }, { # From http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html 'url': 'http://www.nbcnews.com/widget/video-embed/701714499682', 'only_matching': True, }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data = self._parse_json(self._search_regex( r'window\.__data\s*=\s*({.+});', webpage, 'bootstrap json'), video_id, js_to_json) video_data = try_get(data, lambda x: x['video']['current'], dict) if not video_data: video_data = data['article']['content'][0]['primaryMedia']['video'] title = video_data['headline']['primary'] formats = [] for va in video_data.get('videoAssets', []): public_url = va.get('publicUrl') if not public_url: continue if '://link.theplatform.com/' in public_url: public_url = update_url_query(public_url, {'format': 'redirect'}) format_id = va.get('format') if format_id == 'M3U': formats.extend(self._extract_m3u8_formats( public_url, video_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False)) continue tbr = int_or_none(va.get('bitrate'), 1000) if tbr: format_id += '-%d' % tbr formats.append({ 'format_id': format_id, 'url': public_url, 'width': int_or_none(va.get('width')), 'height': int_or_none(va.get('height')), 'tbr': tbr, 'ext': 'mp4', }) self._sort_formats(formats) subtitles = {} closed_captioning = video_data.get('closedCaptioning') if closed_captioning: for cc_url in closed_captioning.values(): if not cc_url: continue subtitles.setdefault('en', []).append({ 'url': cc_url, }) return { 'id': video_id, 'title': title, 'description': try_get(video_data, lambda x: x['description']['primary']), 'thumbnail': try_get(video_data, lambda x: x['primaryImage']['url']['primary']), 'duration': parse_duration(video_data.get('duration')), 'timestamp': unified_timestamp(video_data.get('datePublished')), 'formats': formats, 'subtitles': subtitles, } class NBCOlympicsIE(InfoExtractor): IE_NAME = 'nbcolympics' _VALID_URL = r'https?://www\.nbcolympics\.com/video/(?P<id>[a-z-]+)' _TEST = { # Geo-restricted to US 'url': 'http://www.nbcolympics.com/video/justin-roses-son-leo-was-tears-after-his-dad-won-gold', 'md5': '54fecf846d05429fbaa18af557ee523a', 'info_dict': { 'id': 'WjTBzDXx5AUq', 'display_id': 'justin-roses-son-leo-was-tears-after-his-dad-won-gold', 'ext': 'mp4', 'title': 'Rose\'s son Leo was in tears after his dad won gold', 'description': 'Olympic gold medalist Justin Rose gets emotional talking to the impact his win in men\'s golf has already had on his children.', 'timestamp': 1471274964, 'upload_date': '20160815', 'uploader': 'NBCU-SPORTS', }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) drupal_settings = self._parse_json(self._search_regex( r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', webpage, 'drupal settings'), display_id) iframe_url = drupal_settings['vod']['iframe_url'] theplatform_url = iframe_url.replace( 'vplayer.nbcolympics.com', 'player.theplatform.com') return { '_type': 'url_transparent', 'url': theplatform_url, 'ie_key': ThePlatformIE.ie_key(), 'display_id': display_id, } class NBCOlympicsStreamIE(AdobePassIE): IE_NAME = 'nbcolympics:stream' _VALID_URL = r'https?://stream\.nbcolympics\.com/(?P<id>[0-9a-z-]+)' _TEST = { 'url': 'http://stream.nbcolympics.com/2018-winter-olympics-nbcsn-evening-feb-8', 'info_dict': { 'id': '203493', 'ext': 'mp4', 'title': 're:Curling, Alpine, Luge [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', }, 'params': { # m3u8 download 'skip_download': True, }, } _DATA_URL_TEMPLATE = 'http://stream.nbcolympics.com/data/%s_%s.json' def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) pid = self._search_regex(r'pid\s*=\s*(\d+);', webpage, 'pid') resource = self._search_regex( r"resource\s*=\s*'(.+)';", webpage, 'resource').replace("' + pid + '", pid) event_config = self._download_json( self._DATA_URL_TEMPLATE % ('event_config', pid), pid)['eventConfig'] title = self._live_title(event_config['eventTitle']) source_url = self._download_json( self._DATA_URL_TEMPLATE % ('live_sources', pid), pid)['videoSources'][0]['sourceUrl'] media_token = self._extract_mvpd_auth( url, pid, event_config.get('requestorId', 'NBCOlympics'), resource) formats = self._extract_m3u8_formats(self._download_webpage( 'http://sp.auth.adobe.com/tvs/v1/sign', pid, query={ 'cdn': 'akamai', 'mediaToken': base64.b64encode(media_token.encode()), 'resource': base64.b64encode(resource.encode()), 'url': source_url, }), pid, 'mp4') self._sort_formats(formats) return { 'id': pid, 'display_id': display_id, 'title': title, 'formats': formats, 'is_live': True, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/ndr.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, parse_iso8601, qualities, ) class NDRBaseIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id = next(group for group in mobj.groups() if group) webpage = self._download_webpage(url, display_id) return self._extract_embed(webpage, display_id) class NDRIE(NDRBaseIE): IE_NAME = 'ndr' IE_DESC = 'NDR.de - Norddeutscher Rundfunk' _VALID_URL = r'https?://(?:www\.)?ndr\.de/(?:[^/]+/)*(?P<id>[^/?#]+),[\da-z]+\.html' _TESTS = [{ # httpVideo, same content id 'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html', 'md5': '6515bc255dc5c5f8c85bbc38e035a659', 'info_dict': { 'id': 'hafengeburtstag988', 'display_id': 'Party-Poette-und-Parade', 'ext': 'mp4', 'title': 'Party, Pötte und Parade', 'description': 'md5:ad14f9d2f91d3040b6930c697e5f6b4c', 'uploader': 'ndrtv', 'timestamp': 1431108900, 'upload_date': '20150510', 'duration': 3498, }, 'params': { 'skip_download': True, }, }, { # httpVideo, different content id 'url': 'http://www.ndr.de/sport/fussball/40-Osnabrueck-spielt-sich-in-einen-Rausch,osna270.html', 'md5': '1043ff203eab307f0c51702ec49e9a71', 'info_dict': { 'id': 'osna272', 'display_id': '40-Osnabrueck-spielt-sich-in-einen-Rausch', 'ext': 'mp4', 'title': 'Osnabrück - Wehen Wiesbaden: Die Highlights', 'description': 'md5:32e9b800b3d2d4008103752682d5dc01', 'uploader': 'ndrtv', 'timestamp': 1442059200, 'upload_date': '20150912', 'duration': 510, }, 'params': { 'skip_download': True, }, }, { # httpAudio, same content id 'url': 'http://www.ndr.de/info/La-Valette-entgeht-der-Hinrichtung,audio51535.html', 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8', 'info_dict': { 'id': 'audio51535', 'display_id': 'La-Valette-entgeht-der-Hinrichtung', 'ext': 'mp3', 'title': 'La Valette entgeht der Hinrichtung', 'description': 'md5:22f9541913a40fe50091d5cdd7c9f536', 'uploader': 'ndrinfo', 'timestamp': 1290626100, 'upload_date': '20140729', 'duration': 884, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.ndr.de/Fettes-Brot-Ferris-MC-und-Thees-Uhlmann-live-on-stage,festivalsommer116.html', 'only_matching': True, }] def _extract_embed(self, webpage, display_id): embed_url = self._html_search_meta( 'embedURL', webpage, 'embed URL', fatal=True) description = self._search_regex( r'<p[^>]+itemprop="description">([^<]+)</p>', webpage, 'description', default=None) or self._og_search_description(webpage) timestamp = parse_iso8601( self._search_regex( r'<span[^>]+itemprop="(?:datePublished|uploadDate)"[^>]+content="([^"]+)"', webpage, 'upload date', fatal=False)) return { '_type': 'url_transparent', 'url': embed_url, 'display_id': display_id, 'description': description, 'timestamp': timestamp, } class NJoyIE(NDRBaseIE): IE_NAME = 'njoy' IE_DESC = 'N-JOY' _VALID_URL = r'https?://(?:www\.)?n-joy\.de/(?:[^/]+/)*(?:(?P<display_id>[^/?#]+),)?(?P<id>[\da-z]+)\.html' _TESTS = [{ # httpVideo, same content id 'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html', 'md5': 'cb63be60cd6f9dd75218803146d8dc67', 'info_dict': { 'id': 'comedycontest2480', 'display_id': 'Benaissa-beim-NDR-Comedy-Contest', 'ext': 'mp4', 'title': 'Benaissa beim NDR Comedy Contest', 'description': 'md5:f057a6c4e1c728b10d33b5ffd36ddc39', 'uploader': 'ndrtv', 'upload_date': '20141129', 'duration': 654, }, 'params': { 'skip_download': True, }, }, { # httpVideo, different content id 'url': 'http://www.n-joy.de/musik/Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-,felixjaehn168.html', 'md5': '417660fffa90e6df2fda19f1b40a64d8', 'info_dict': { 'id': 'dockville882', 'display_id': 'Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-', 'ext': 'mp4', 'title': '"Ich hab noch nie" mit Felix Jaehn', 'description': 'md5:85dd312d53be1b99e1f998a16452a2f3', 'uploader': 'njoy', 'upload_date': '20150822', 'duration': 211, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.n-joy.de/radio/webradio/morningshow209.html', 'only_matching': True, }] def _extract_embed(self, webpage, display_id): video_id = self._search_regex( r'<iframe[^>]+id="pp_([\da-z]+)"', webpage, 'embed id') description = self._search_regex( r'<div[^>]+class="subline"[^>]*>[^<]+</div>\s*<p>([^<]+)</p>', webpage, 'description', fatal=False) return { '_type': 'url_transparent', 'ie_key': 'NDREmbedBase', 'url': 'ndr:%s' % video_id, 'display_id': display_id, 'description': description, } class NDREmbedBaseIE(InfoExtractor): IE_NAME = 'ndr:embed:base' _VALID_URL = r'(?:ndr:(?P<id_s>[\da-z]+)|https?://www\.ndr\.de/(?P<id>[\da-z]+)-ppjson\.json)' _TESTS = [{ 'url': 'ndr:soundcheck3366', 'only_matching': True, }, { 'url': 'http://www.ndr.de/soundcheck3366-ppjson.json', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') or mobj.group('id_s') ppjson = self._download_json( 'http://www.ndr.de/%s-ppjson.json' % video_id, video_id) playlist = ppjson['playlist'] formats = [] quality_key = qualities(('xs', 's', 'm', 'l', 'xl')) for format_id, f in playlist.items(): src = f.get('src') if not src: continue ext = determine_ext(src, None) if ext == 'f4m': formats.extend(self._extract_f4m_formats( src + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, f4m_id='hds', fatal=False)) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( src, video_id, 'mp4', m3u8_id='hls', entry_protocol='m3u8_native', fatal=False)) else: quality = f.get('quality') ff = { 'url': src, 'format_id': quality or format_id, 'quality': quality_key(quality), } type_ = f.get('type') if type_ and type_.split('/')[0] == 'audio': ff['vcodec'] = 'none' ff['ext'] = ext or 'mp3' formats.append(ff) self._sort_formats(formats) config = playlist['config'] live = playlist.get('config', {}).get('streamType') in ['httpVideoLive', 'httpAudioLive'] title = config['title'] if live: title = self._live_title(title) uploader = ppjson.get('config', {}).get('branding') upload_date = ppjson.get('config', {}).get('publicationDate') duration = int_or_none(config.get('duration')) thumbnails = [{ 'id': thumbnail.get('quality') or thumbnail_id, 'url': thumbnail['src'], 'preference': quality_key(thumbnail.get('quality')), } for thumbnail_id, thumbnail in config.get('poster', {}).items() if thumbnail.get('src')] return { 'id': video_id, 'title': title, 'is_live': live, 'uploader': uploader if uploader != '-' else None, 'upload_date': upload_date[0:8] if upload_date else None, 'duration': duration, 'thumbnails': thumbnails, 'formats': formats, } class NDREmbedIE(NDREmbedBaseIE): IE_NAME = 'ndr:embed' _VALID_URL = r'https?://(?:www\.)?ndr\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)\.html' _TESTS = [{ 'url': 'http://www.ndr.de/fernsehen/sendungen/ndr_aktuell/ndraktuell28488-player.html', 'md5': '8b9306142fe65bbdefb5ce24edb6b0a9', 'info_dict': { 'id': 'ndraktuell28488', 'ext': 'mp4', 'title': 'Norddeutschland begrüßt Flüchtlinge', 'is_live': False, 'uploader': 'ndrtv', 'upload_date': '20150907', 'duration': 132, }, }, { 'url': 'http://www.ndr.de/ndr2/events/soundcheck/soundcheck3366-player.html', 'md5': '002085c44bae38802d94ae5802a36e78', 'info_dict': { 'id': 'soundcheck3366', 'ext': 'mp4', 'title': 'Ella Henderson braucht Vergleiche nicht zu scheuen', 'is_live': False, 'uploader': 'ndr2', 'upload_date': '20150912', 'duration': 3554, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.ndr.de/info/audio51535-player.html', 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8', 'info_dict': { 'id': 'audio51535', 'ext': 'mp3', 'title': 'La Valette entgeht der Hinrichtung', 'is_live': False, 'uploader': 'ndrinfo', 'upload_date': '20140729', 'duration': 884, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/visite/visite11010-externalPlayer.html', 'md5': 'ae57f80511c1e1f2fd0d0d3d31aeae7c', 'info_dict': { 'id': 'visite11010', 'ext': 'mp4', 'title': 'Visite - die ganze Sendung', 'is_live': False, 'uploader': 'ndrtv', 'upload_date': '20150902', 'duration': 3525, }, 'params': { 'skip_download': True, }, }, { # httpVideoLive 'url': 'http://www.ndr.de/fernsehen/livestream/livestream217-externalPlayer.html', 'info_dict': { 'id': 'livestream217', 'ext': 'flv', 'title': r're:^NDR Fernsehen Niedersachsen \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'is_live': True, 'upload_date': '20150910', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.ndr.de/ndrkultur/audio255020-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/nordtour/nordtour7124-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/kultur/film/videos/videoimport10424-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/hamburg_journal/hamj43006-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/weltbilder/weltbilder4518-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/doku952-player.html', 'only_matching': True, }] class NJoyEmbedIE(NDREmbedBaseIE): IE_NAME = 'njoy:embed' _VALID_URL = r'https?://(?:www\.)?n-joy\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)_[^/]+\.html' _TESTS = [{ # httpVideo 'url': 'http://www.n-joy.de/events/reeperbahnfestival/doku948-player_image-bc168e87-5263-4d6d-bd27-bb643005a6de_theme-n-joy.html', 'md5': '8483cbfe2320bd4d28a349d62d88bd74', 'info_dict': { 'id': 'doku948', 'ext': 'mp4', 'title': 'Zehn Jahre Reeperbahn Festival - die Doku', 'is_live': False, 'upload_date': '20150807', 'duration': 1011, }, }, { # httpAudio 'url': 'http://www.n-joy.de/news_wissen/stefanrichter100-player_image-d5e938b1-f21a-4b9a-86b8-aaba8bca3a13_theme-n-joy.html', 'md5': 'd989f80f28ac954430f7b8a48197188a', 'info_dict': { 'id': 'stefanrichter100', 'ext': 'mp3', 'title': 'Interview mit einem Augenzeugen', 'is_live': False, 'uploader': 'njoy', 'upload_date': '20150909', 'duration': 140, }, 'params': { 'skip_download': True, }, }, { # httpAudioLive, no explicit ext 'url': 'http://www.n-joy.de/news_wissen/webradioweltweit100-player_image-3fec0484-2244-4565-8fb8-ed25fd28b173_theme-n-joy.html', 'info_dict': { 'id': 'webradioweltweit100', 'ext': 'mp3', 'title': r're:^N-JOY Weltweit \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'is_live': True, 'uploader': 'njoy', 'upload_date': '20150810', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.n-joy.de/musik/dockville882-player_image-3905259e-0803-4764-ac72-8b7de077d80a_theme-n-joy.html', 'only_matching': True, }, { 'url': 'http://www.n-joy.de/radio/sendungen/morningshow/urlaubsfotos190-player_image-066a5df1-5c95-49ec-a323-941d848718db_theme-n-joy.html', 'only_matching': True, }, { 'url': 'http://www.n-joy.de/entertainment/comedy/krudetv290-player_image-ab261bfe-51bf-4bf3-87ba-c5122ee35b3d_theme-n-joy.html', 'only_matching': True, }]
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/ndtv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_urllib_parse_unquote_plus ) from ..utils import ( parse_duration, remove_end, unified_strdate, urljoin ) class NDTVIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?ndtv\.com/(?:[^/]+/)*videos?/?(?:[^/]+/)*[^/?^&]+-(?P<id>\d+)' _TESTS = [ { 'url': 'https://khabar.ndtv.com/video/show/prime-time/prime-time-ill-system-and-poor-education-468818', 'md5': '78efcf3880ef3fd9b83d405ca94a38eb', 'info_dict': { 'id': '468818', 'ext': 'mp4', 'title': "प्राइम टाइम: सिस्टम बीमार, स्कूल बदहाल", 'description': 'md5:f410512f1b49672e5695dea16ef2731d', 'upload_date': '20170928', 'duration': 2218, 'thumbnail': r're:https?://.*\.jpg', } }, { # __filename is url 'url': 'http://movies.ndtv.com/videos/cracker-free-diwali-wishes-from-karan-johar-kriti-sanon-other-stars-470304', 'md5': 'f1d709352305b44443515ac56b45aa46', 'info_dict': { 'id': '470304', 'ext': 'mp4', 'title': "Cracker-Free Diwali Wishes From Karan Johar, Kriti Sanon & Other Stars", 'description': 'md5:f115bba1adf2f6433fa7c1ade5feb465', 'upload_date': '20171019', 'duration': 137, 'thumbnail': r're:https?://.*\.jpg', } }, { 'url': 'https://www.ndtv.com/video/news/news/delhi-s-air-quality-status-report-after-diwali-is-very-poor-470372', 'only_matching': True }, { 'url': 'https://auto.ndtv.com/videos/the-cnb-daily-october-13-2017-469935', 'only_matching': True }, { 'url': 'https://sports.ndtv.com/cricket/videos/2nd-t20i-rock-thrown-at-australia-cricket-team-bus-after-win-over-india-469764', 'only_matching': True }, { 'url': 'http://gadgets.ndtv.com/videos/uncharted-the-lost-legacy-review-465568', 'only_matching': True }, { 'url': 'http://profit.ndtv.com/videos/news/video-indian-economy-on-very-solid-track-international-monetary-fund-chief-470040', 'only_matching': True }, { 'url': 'http://food.ndtv.com/video-basil-seeds-coconut-porridge-419083', 'only_matching': True }, { 'url': 'https://doctor.ndtv.com/videos/top-health-stories-of-the-week-467396', 'only_matching': True }, { 'url': 'https://swirlster.ndtv.com/video/how-to-make-friends-at-work-469324', 'only_matching': True } ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) # '__title' does not contain extra words such as sub-site name, "Video" etc. title = compat_urllib_parse_unquote_plus( self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None) or self._og_search_title(webpage)) filename = self._search_regex( r"(?:__)?filename\s*[:=]\s*'([^']+)'", webpage, 'video filename') # in "movies" sub-site pages, filename is URL video_url = urljoin('https://ndtvod.bc-ssl.cdn.bitgravity.com/23372/ndtv/', filename.lstrip('/')) # "doctor" sub-site has MM:SS format duration = parse_duration(self._search_regex( r"(?:__)?duration\s*[:=]\s*'([^']+)'", webpage, 'duration', fatal=False)) # "sports", "doctor", "swirlster" sub-sites don't have 'publish-date' upload_date = unified_strdate(self._html_search_meta( 'publish-date', webpage, 'upload date', default=None) or self._html_search_meta( 'uploadDate', webpage, 'upload date', default=None) or self._search_regex( r'datePublished"\s*:\s*"([^"]+)"', webpage, 'upload date', fatal=False)) description = remove_end(self._og_search_description(webpage), ' (Read more)') return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': self._og_search_thumbnail(webpage), 'duration': duration, 'upload_date': upload_date, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/nerdcubed.py
# coding: utf-8 from __future__ import unicode_literals import datetime from .common import InfoExtractor class NerdCubedFeedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?nerdcubed\.co\.uk/feed\.json' _TEST = { 'url': 'http://www.nerdcubed.co.uk/feed.json', 'info_dict': { 'id': 'nerdcubed-feed', 'title': 'nerdcubed.co.uk feed', }, 'playlist_mincount': 1300, } def _real_extract(self, url): feed = self._download_json(url, url, 'Downloading NerdCubed JSON feed') entries = [{ '_type': 'url', 'title': feed_entry['title'], 'uploader': feed_entry['source']['name'] if feed_entry['source'] else None, 'upload_date': datetime.datetime.strptime(feed_entry['date'], '%Y-%m-%d').strftime('%Y%m%d'), 'url': 'http://www.youtube.com/watch?v=' + feed_entry['youtube_id'], } for feed_entry in feed] return { '_type': 'playlist', 'title': 'nerdcubed.co.uk feed', 'id': 'nerdcubed-feed', 'entries': entries, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/neteasemusic.py
# coding: utf-8 from __future__ import unicode_literals from hashlib import md5 from base64 import b64encode from datetime import datetime import re from .common import InfoExtractor from ..compat import ( compat_urllib_parse_urlencode, compat_str, compat_itertools_count, ) from ..utils import ( sanitized_Request, float_or_none, ) class NetEaseMusicBaseIE(InfoExtractor): _FORMATS = ['bMusic', 'mMusic', 'hMusic'] _NETEASE_SALT = '3go8&$8*3*3h0k(2)2' _API_BASE = 'http://music.163.com/api/' @classmethod def _encrypt(cls, dfsid): salt_bytes = bytearray(cls._NETEASE_SALT.encode('utf-8')) string_bytes = bytearray(compat_str(dfsid).encode('ascii')) salt_len = len(salt_bytes) for i in range(len(string_bytes)): string_bytes[i] = string_bytes[i] ^ salt_bytes[i % salt_len] m = md5() m.update(bytes(string_bytes)) result = b64encode(m.digest()).decode('ascii') return result.replace('/', '_').replace('+', '-') def extract_formats(self, info): formats = [] for song_format in self._FORMATS: details = info.get(song_format) if not details: continue song_file_path = '/%s/%s.%s' % ( self._encrypt(details['dfsId']), details['dfsId'], details['extension']) # 203.130.59.9, 124.40.233.182, 115.231.74.139, etc is a reverse proxy-like feature # from NetEase's CDN provider that can be used if m5.music.126.net does not # work, especially for users outside of Mainland China # via: https://github.com/JixunMoe/unblock-163/issues/3#issuecomment-163115880 for host in ('http://m5.music.126.net', 'http://115.231.74.139/m1.music.126.net', 'http://124.40.233.182/m1.music.126.net', 'http://203.130.59.9/m1.music.126.net'): song_url = host + song_file_path if self._is_valid_url(song_url, info['id'], 'song'): formats.append({ 'url': song_url, 'ext': details.get('extension'), 'abr': float_or_none(details.get('bitrate'), scale=1000), 'format_id': song_format, 'filesize': details.get('size'), 'asr': details.get('sr') }) break return formats @classmethod def convert_milliseconds(cls, ms): return int(round(ms / 1000.0)) def query_api(self, endpoint, video_id, note): req = sanitized_Request('%s%s' % (self._API_BASE, endpoint)) req.add_header('Referer', self._API_BASE) return self._download_json(req, video_id, note) class NetEaseMusicIE(NetEaseMusicBaseIE): IE_NAME = 'netease:song' IE_DESC = '网易云音乐' _VALID_URL = r'https?://music\.163\.com/(#/)?song\?id=(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://music.163.com/#/song?id=32102397', 'md5': 'f2e97280e6345c74ba9d5677dd5dcb45', 'info_dict': { 'id': '32102397', 'ext': 'mp3', 'title': 'Bad Blood (feat. Kendrick Lamar)', 'creator': 'Taylor Swift / Kendrick Lamar', 'upload_date': '20150517', 'timestamp': 1431878400, 'description': 'md5:a10a54589c2860300d02e1de821eb2ef', }, 'skip': 'Blocked outside Mainland China', }, { 'note': 'No lyrics translation.', 'url': 'http://music.163.com/#/song?id=29822014', 'info_dict': { 'id': '29822014', 'ext': 'mp3', 'title': '听见下雨的声音', 'creator': '周杰伦', 'upload_date': '20141225', 'timestamp': 1419523200, 'description': 'md5:a4d8d89f44656af206b7b2555c0bce6c', }, 'skip': 'Blocked outside Mainland China', }, { 'note': 'No lyrics.', 'url': 'http://music.163.com/song?id=17241424', 'info_dict': { 'id': '17241424', 'ext': 'mp3', 'title': 'Opus 28', 'creator': 'Dustin O\'Halloran', 'upload_date': '20080211', 'timestamp': 1202745600, }, 'skip': 'Blocked outside Mainland China', }, { 'note': 'Has translated name.', 'url': 'http://music.163.com/#/song?id=22735043', 'info_dict': { 'id': '22735043', 'ext': 'mp3', 'title': '소원을 말해봐 (Genie)', 'creator': '少女时代', 'description': 'md5:79d99cc560e4ca97e0c4d86800ee4184', 'upload_date': '20100127', 'timestamp': 1264608000, 'alt_title': '说出愿望吧(Genie)', }, 'skip': 'Blocked outside Mainland China', }] def _process_lyrics(self, lyrics_info): original = lyrics_info.get('lrc', {}).get('lyric') translated = lyrics_info.get('tlyric', {}).get('lyric') if not translated: return original lyrics_expr = r'(\[[0-9]{2}:[0-9]{2}\.[0-9]{2,}\])([^\n]+)' original_ts_texts = re.findall(lyrics_expr, original) translation_ts_dict = dict( (time_stamp, text) for time_stamp, text in re.findall(lyrics_expr, translated) ) lyrics = '\n'.join([ '%s%s / %s' % (time_stamp, text, translation_ts_dict.get(time_stamp, '')) for time_stamp, text in original_ts_texts ]) return lyrics def _real_extract(self, url): song_id = self._match_id(url) params = { 'id': song_id, 'ids': '[%s]' % song_id } info = self.query_api( 'song/detail?' + compat_urllib_parse_urlencode(params), song_id, 'Downloading song info')['songs'][0] formats = self.extract_formats(info) self._sort_formats(formats) lyrics_info = self.query_api( 'song/lyric?id=%s&lv=-1&tv=-1' % song_id, song_id, 'Downloading lyrics data') lyrics = self._process_lyrics(lyrics_info) alt_title = None if info.get('transNames'): alt_title = '/'.join(info.get('transNames')) return { 'id': song_id, 'title': info['name'], 'alt_title': alt_title, 'creator': ' / '.join([artist['name'] for artist in info.get('artists', [])]), 'timestamp': self.convert_milliseconds(info.get('album', {}).get('publishTime')), 'thumbnail': info.get('album', {}).get('picUrl'), 'duration': self.convert_milliseconds(info.get('duration', 0)), 'description': lyrics, 'formats': formats, } class NetEaseMusicAlbumIE(NetEaseMusicBaseIE): IE_NAME = 'netease:album' IE_DESC = '网易云音乐 - 专辑' _VALID_URL = r'https?://music\.163\.com/(#/)?album\?id=(?P<id>[0-9]+)' _TEST = { 'url': 'http://music.163.com/#/album?id=220780', 'info_dict': { 'id': '220780', 'title': 'B\'day', }, 'playlist_count': 23, 'skip': 'Blocked outside Mainland China', } def _real_extract(self, url): album_id = self._match_id(url) info = self.query_api( 'album/%s?id=%s' % (album_id, album_id), album_id, 'Downloading album data')['album'] name = info['name'] desc = info.get('description') entries = [ self.url_result('http://music.163.com/#/song?id=%s' % song['id'], 'NetEaseMusic', song['id']) for song in info['songs'] ] return self.playlist_result(entries, album_id, name, desc) class NetEaseMusicSingerIE(NetEaseMusicBaseIE): IE_NAME = 'netease:singer' IE_DESC = '网易云音乐 - 歌手' _VALID_URL = r'https?://music\.163\.com/(#/)?artist\?id=(?P<id>[0-9]+)' _TESTS = [{ 'note': 'Singer has aliases.', 'url': 'http://music.163.com/#/artist?id=10559', 'info_dict': { 'id': '10559', 'title': '张惠妹 - aMEI;阿密特', }, 'playlist_count': 50, 'skip': 'Blocked outside Mainland China', }, { 'note': 'Singer has translated name.', 'url': 'http://music.163.com/#/artist?id=124098', 'info_dict': { 'id': '124098', 'title': '李昇基 - 이승기', }, 'playlist_count': 50, 'skip': 'Blocked outside Mainland China', }] def _real_extract(self, url): singer_id = self._match_id(url) info = self.query_api( 'artist/%s?id=%s' % (singer_id, singer_id), singer_id, 'Downloading singer data') name = info['artist']['name'] if info['artist']['trans']: name = '%s - %s' % (name, info['artist']['trans']) if info['artist']['alias']: name = '%s - %s' % (name, ';'.join(info['artist']['alias'])) entries = [ self.url_result('http://music.163.com/#/song?id=%s' % song['id'], 'NetEaseMusic', song['id']) for song in info['hotSongs'] ] return self.playlist_result(entries, singer_id, name) class NetEaseMusicListIE(NetEaseMusicBaseIE): IE_NAME = 'netease:playlist' IE_DESC = '网易云音乐 - 歌单' _VALID_URL = r'https?://music\.163\.com/(#/)?(playlist|discover/toplist)\?id=(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://music.163.com/#/playlist?id=79177352', 'info_dict': { 'id': '79177352', 'title': 'Billboard 2007 Top 100', 'description': 'md5:12fd0819cab2965b9583ace0f8b7b022' }, 'playlist_count': 99, 'skip': 'Blocked outside Mainland China', }, { 'note': 'Toplist/Charts sample', 'url': 'http://music.163.com/#/discover/toplist?id=3733003', 'info_dict': { 'id': '3733003', 'title': 're:韩国Melon排行榜周榜 [0-9]{4}-[0-9]{2}-[0-9]{2}', 'description': 'md5:73ec782a612711cadc7872d9c1e134fc', }, 'playlist_count': 50, 'skip': 'Blocked outside Mainland China', }] def _real_extract(self, url): list_id = self._match_id(url) info = self.query_api( 'playlist/detail?id=%s&lv=-1&tv=-1' % list_id, list_id, 'Downloading playlist data')['result'] name = info['name'] desc = info.get('description') if info.get('specialType') == 10: # is a chart/toplist datestamp = datetime.fromtimestamp( self.convert_milliseconds(info['updateTime'])).strftime('%Y-%m-%d') name = '%s %s' % (name, datestamp) entries = [ self.url_result('http://music.163.com/#/song?id=%s' % song['id'], 'NetEaseMusic', song['id']) for song in info['tracks'] ] return self.playlist_result(entries, list_id, name, desc) class NetEaseMusicMvIE(NetEaseMusicBaseIE): IE_NAME = 'netease:mv' IE_DESC = '网易云音乐 - MV' _VALID_URL = r'https?://music\.163\.com/(#/)?mv\?id=(?P<id>[0-9]+)' _TEST = { 'url': 'http://music.163.com/#/mv?id=415350', 'info_dict': { 'id': '415350', 'ext': 'mp4', 'title': '이럴거면 그러지말지', 'description': '白雅言自作曲唱甜蜜爱情', 'creator': '白雅言', 'upload_date': '20150520', }, 'skip': 'Blocked outside Mainland China', } def _real_extract(self, url): mv_id = self._match_id(url) info = self.query_api( 'mv/detail?id=%s&type=mp4' % mv_id, mv_id, 'Downloading mv info')['data'] formats = [ {'url': mv_url, 'ext': 'mp4', 'format_id': '%sp' % brs, 'height': int(brs)} for brs, mv_url in info['brs'].items() ] self._sort_formats(formats) return { 'id': mv_id, 'title': info['name'], 'description': info.get('desc') or info.get('briefDesc'), 'creator': info['artistName'], 'upload_date': info['publishTime'].replace('-', ''), 'formats': formats, 'thumbnail': info.get('cover'), 'duration': self.convert_milliseconds(info.get('duration', 0)), } class NetEaseMusicProgramIE(NetEaseMusicBaseIE): IE_NAME = 'netease:program' IE_DESC = '网易云音乐 - 电台节目' _VALID_URL = r'https?://music\.163\.com/(#/?)program\?id=(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://music.163.com/#/program?id=10109055', 'info_dict': { 'id': '10109055', 'ext': 'mp3', 'title': '不丹足球背后的故事', 'description': '喜马拉雅人的足球梦 ...', 'creator': '大话西藏', 'timestamp': 1434179342, 'upload_date': '20150613', 'duration': 900, }, 'skip': 'Blocked outside Mainland China', }, { 'note': 'This program has accompanying songs.', 'url': 'http://music.163.com/#/program?id=10141022', 'info_dict': { 'id': '10141022', 'title': '25岁,你是自在如风的少年<27°C>', 'description': 'md5:8d594db46cc3e6509107ede70a4aaa3b', }, 'playlist_count': 4, 'skip': 'Blocked outside Mainland China', }, { 'note': 'This program has accompanying songs.', 'url': 'http://music.163.com/#/program?id=10141022', 'info_dict': { 'id': '10141022', 'ext': 'mp3', 'title': '25岁,你是自在如风的少年<27°C>', 'description': 'md5:8d594db46cc3e6509107ede70a4aaa3b', 'timestamp': 1434450841, 'upload_date': '20150616', }, 'params': { 'noplaylist': True }, 'skip': 'Blocked outside Mainland China', }] def _real_extract(self, url): program_id = self._match_id(url) info = self.query_api( 'dj/program/detail?id=%s' % program_id, program_id, 'Downloading program info')['program'] name = info['name'] description = info['description'] if not info['songs'] or self._downloader.params.get('noplaylist'): if info['songs']: self.to_screen( 'Downloading just the main audio %s because of --no-playlist' % info['mainSong']['id']) formats = self.extract_formats(info['mainSong']) self._sort_formats(formats) return { 'id': program_id, 'title': name, 'description': description, 'creator': info['dj']['brand'], 'timestamp': self.convert_milliseconds(info['createTime']), 'thumbnail': info['coverUrl'], 'duration': self.convert_milliseconds(info.get('duration', 0)), 'formats': formats, } self.to_screen( 'Downloading playlist %s - add --no-playlist to just download the main audio %s' % (program_id, info['mainSong']['id'])) song_ids = [info['mainSong']['id']] song_ids.extend([song['id'] for song in info['songs']]) entries = [ self.url_result('http://music.163.com/#/song?id=%s' % song_id, 'NetEaseMusic', song_id) for song_id in song_ids ] return self.playlist_result(entries, program_id, name, description) class NetEaseMusicDjRadioIE(NetEaseMusicBaseIE): IE_NAME = 'netease:djradio' IE_DESC = '网易云音乐 - 电台' _VALID_URL = r'https?://music\.163\.com/(#/)?djradio\?id=(?P<id>[0-9]+)' _TEST = { 'url': 'http://music.163.com/#/djradio?id=42', 'info_dict': { 'id': '42', 'title': '声音蔓延', 'description': 'md5:766220985cbd16fdd552f64c578a6b15' }, 'playlist_mincount': 40, 'skip': 'Blocked outside Mainland China', } _PAGE_SIZE = 1000 def _real_extract(self, url): dj_id = self._match_id(url) name = None desc = None entries = [] for offset in compat_itertools_count(start=0, step=self._PAGE_SIZE): info = self.query_api( 'dj/program/byradio?asc=false&limit=%d&radioId=%s&offset=%d' % (self._PAGE_SIZE, dj_id, offset), dj_id, 'Downloading dj programs - %d' % offset) entries.extend([ self.url_result( 'http://music.163.com/#/program?id=%s' % program['id'], 'NetEaseMusicProgram', program['id']) for program in info['programs'] ]) if name is None: radio = info['programs'][0]['radio'] name = radio['name'] desc = radio['desc'] if not info['more']: break return self.playlist_result(entries, dj_id, name, desc)
[]
[]
[]