zip
stringlengths
19
109
filename
stringlengths
4
185
contents
stringlengths
0
30.1M
type_annotations
listlengths
0
1.97k
type_annotation_starts
listlengths
0
1.97k
type_annotation_ends
listlengths
0
1.97k
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/redbulltv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( float_or_none, ExtractorError, ) class RedBullTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?redbull(?:\.tv|\.com(?:/[^/]+)?(?:/tv)?)(?:/events/[^/]+)?/(?:videos?|live)/(?P<id>AP-\w+)' _TESTS = [{ # film 'url': 'https://www.redbull.tv/video/AP-1Q6XCDTAN1W11', 'md5': 'fb0445b98aa4394e504b413d98031d1f', 'info_dict': { 'id': 'AP-1Q6XCDTAN1W11', 'ext': 'mp4', 'title': 'ABC of... WRC - ABC of... S1E6', 'description': 'md5:5c7ed8f4015c8492ecf64b6ab31e7d31', 'duration': 1582.04, }, }, { # episode 'url': 'https://www.redbull.tv/video/AP-1PMHKJFCW1W11', 'info_dict': { 'id': 'AP-1PMHKJFCW1W11', 'ext': 'mp4', 'title': 'Grime - Hashtags S2E4', 'description': 'md5:b5f522b89b72e1e23216e5018810bb25', 'duration': 904.6, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.redbull.com/int-en/tv/video/AP-1UWHCAR9S1W11/rob-meets-sam-gaze?playlist=playlists::3f81040a-2f31-4832-8e2e-545b1d39d173', 'only_matching': True, }, { 'url': 'https://www.redbull.com/us-en/videos/AP-1YM9QCYE52111', 'only_matching': True, }, { 'url': 'https://www.redbull.com/us-en/events/AP-1XV2K61Q51W11/live/AP-1XUJ86FDH1W11', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) session = self._download_json( 'https://api.redbull.tv/v3/session', video_id, note='Downloading access token', query={ 'category': 'personal_computer', 'os_family': 'http', }) if session.get('code') == 'error': raise ExtractorError('%s said: %s' % ( self.IE_NAME, session['message'])) token = session['token'] try: video = self._download_json( 'https://api.redbull.tv/v3/products/' + video_id, video_id, note='Downloading video information', headers={'Authorization': token} ) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404: error_message = self._parse_json( e.cause.read().decode(), video_id)['error'] raise ExtractorError('%s said: %s' % ( self.IE_NAME, error_message), expected=True) raise title = video['title'].strip() formats = self._extract_m3u8_formats( 'https://dms.redbull.tv/v3/%s/%s/playlist.m3u8' % (video_id, token), video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') self._sort_formats(formats) subtitles = {} for resource in video.get('resources', []): if resource.startswith('closed_caption_'): splitted_resource = resource.split('_') if splitted_resource[2]: subtitles.setdefault('en', []).append({ 'url': 'https://resources.redbull.tv/%s/%s' % (video_id, resource), 'ext': splitted_resource[2], }) subheading = video.get('subheading') if subheading: title += ' - %s' % subheading return { 'id': video_id, 'title': title, 'description': video.get('long_description') or video.get( 'short_description'), 'duration': float_or_none(video.get('duration'), scale=1000), 'formats': formats, 'subtitles': subtitles, } class RedBullTVRrnContentIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?redbull(?:\.tv|\.com(?:/[^/]+)?(?:/tv)?)/(?:video|live)/rrn:content:[^:]+:(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _TESTS = [{ 'url': 'https://www.redbull.com/int-en/tv/video/rrn:content:live-videos:e3e6feb4-e95f-50b7-962a-c70f8fd13c73/mens-dh-finals-fort-william', 'only_matching': True, }, { 'url': 'https://www.redbull.com/int-en/tv/video/rrn:content:videos:a36a0f36-ff1b-5db8-a69d-ee11a14bf48b/tn-ts-style?playlist=rrn:content:event-profiles:83f05926-5de8-5389-b5e4-9bb312d715e8:extras', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_url = self._og_search_url(webpage) return self.url_result( video_url, ie=RedBullTVIE.ie_key(), video_id=RedBullTVIE._match_id(video_url))
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/reddit.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, float_or_none, url_or_none, ) class RedditIE(InfoExtractor): _VALID_URL = r'https?://v\.redd\.it/(?P<id>[^/?#&]+)' _TEST = { # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/ 'url': 'https://v.redd.it/zv89llsvexdz', 'md5': '0a070c53eba7ec4534d95a5a1259e253', 'info_dict': { 'id': 'zv89llsvexdz', 'ext': 'mp4', 'title': 'zv89llsvexdz', }, 'params': { 'format': 'bestvideo', }, } def _real_extract(self, url): video_id = self._match_id(url) formats = self._extract_m3u8_formats( 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) formats.extend(self._extract_mpd_formats( 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id, mpd_id='dash', fatal=False)) self._sort_formats(formats) return { 'id': video_id, 'title': video_id, 'formats': formats, } class RedditRIE(InfoExtractor): _VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))' _TESTS = [{ 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/', 'info_dict': { 'id': 'zv89llsvexdz', 'ext': 'mp4', 'title': 'That small heart attack.', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1501941939, 'upload_date': '20170805', 'uploader': 'Antw87', 'like_count': int, 'dislike_count': int, 'comment_count': int, 'age_limit': 0, }, 'params': { 'format': 'bestvideo', 'skip_download': True, }, }, { 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj', 'only_matching': True, }, { # imgur 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/', 'only_matching': True, }, { # imgur @ old reddit 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/', 'only_matching': True, }, { # streamable 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/', 'only_matching': True, }, { # youtube 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/', 'only_matching': True, }, { # reddit video @ nm reddit 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) url, video_id = mobj.group('url', 'id') video_id = self._match_id(url) data = self._download_json( url + '/.json', video_id)[0]['data']['children'][0]['data'] video_url = data['url'] # Avoid recursing into the same reddit URL if 'reddit.com/' in video_url and '/%s/' % video_id in video_url: raise ExtractorError('No media found', expected=True) over_18 = data.get('over_18') if over_18 is True: age_limit = 18 elif over_18 is False: age_limit = 0 else: age_limit = None return { '_type': 'url_transparent', 'url': video_url, 'title': data.get('title'), 'thumbnail': url_or_none(data.get('thumbnail')), 'timestamp': float_or_none(data.get('created_utc')), 'uploader': data.get('author'), 'like_count': int_or_none(data.get('ups')), 'dislike_count': int_or_none(data.get('downs')), 'comment_count': int_or_none(data.get('num_comments')), 'age_limit': age_limit, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/redtube.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, merge_dicts, str_to_int, unified_strdate, url_or_none, ) class RedTubeIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://www.redtube.com/66418', 'md5': 'fc08071233725f26b8f014dba9590005', 'info_dict': { 'id': '66418', 'ext': 'mp4', 'title': 'Sucked on a toilet', 'upload_date': '20110811', 'duration': 596, 'view_count': int, 'age_limit': 18, } }, { 'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)', webpage) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'http://www.redtube.com/%s' % video_id, video_id) if any(s in webpage for s in ['video-deleted-info', '>This video has been removed']): raise ExtractorError('Video %s has been removed' % video_id, expected=True) info = self._search_json_ld(webpage, video_id, default={}) if not info.get('title'): info['title'] = self._html_search_regex( (r'<h(\d)[^>]+class="(?:video_title_text|videoTitle)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>', r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',), webpage, 'title', group='title', default=None) or self._og_search_title(webpage) formats = [] sources = self._parse_json( self._search_regex( r'sources\s*:\s*({.+?})', webpage, 'source', default='{}'), video_id, fatal=False) if sources and isinstance(sources, dict): for format_id, format_url in sources.items(): if format_url: formats.append({ 'url': format_url, 'format_id': format_id, 'height': int_or_none(format_id), }) medias = self._parse_json( self._search_regex( r'mediaDefinition\s*:\s*(\[.+?\])', webpage, 'media definitions', default='{}'), video_id, fatal=False) if medias and isinstance(medias, list): for media in medias: format_url = url_or_none(media.get('videoUrl')) if not format_url: continue format_id = media.get('quality') formats.append({ 'url': format_url, 'format_id': format_id, 'height': int_or_none(format_id), }) if not formats: video_url = self._html_search_regex( r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL') formats.append({'url': video_url}) self._sort_formats(formats) thumbnail = self._og_search_thumbnail(webpage) upload_date = unified_strdate(self._search_regex( r'<span[^>]+>(?:ADDED|Published on) ([^<]+)<', webpage, 'upload date', default=None)) duration = int_or_none(self._og_search_property( 'video:duration', webpage, default=None) or self._search_regex( r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None)) view_count = str_to_int(self._search_regex( (r'<div[^>]*>Views</div>\s*<div[^>]*>\s*([\d,.]+)', r'<span[^>]*>VIEWS</span>\s*</td>\s*<td>\s*([\d,.]+)', r'<span[^>]+\bclass=["\']video_view_count[^>]*>\s*([\d,.]+)'), webpage, 'view count', default=None)) # No self-labeling, but they describe themselves as # "Home of Videos Porno" age_limit = 18 return merge_dicts(info, { 'id': video_id, 'ext': 'mp4', 'thumbnail': thumbnail, 'upload_date': upload_date, 'duration': duration, 'view_count': view_count, 'age_limit': age_limit, 'formats': formats, })
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/regiotv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( sanitized_Request, xpath_text, xpath_with_ns, ) class RegioTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?regio-tv\.de/video/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://www.regio-tv.de/video/395808.html', 'info_dict': { 'id': '395808', 'ext': 'mp4', 'title': 'Wir in Ludwigsburg', 'description': 'Mit unseren zuckersüßen Adventskindern, außerdem besuchen wir die Abendsterne!', } }, { 'url': 'http://www.regio-tv.de/video/395808', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) key = self._search_regex( r'key\s*:\s*(["\'])(?P<key>.+?)\1', webpage, 'key', group='key') title = self._og_search_title(webpage) SOAP_TEMPLATE = '<?xml version="1.0" encoding="utf-8"?><soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"><soap:Body><{0} xmlns="http://v.telvi.de/"><key xsi:type="xsd:string">{1}</key></{0}></soap:Body></soap:Envelope>' request = sanitized_Request( 'http://v.telvi.de/', SOAP_TEMPLATE.format('GetHTML5VideoData', key).encode('utf-8')) video_data = self._download_xml(request, video_id, 'Downloading video XML') NS_MAP = { 'xsi': 'http://www.w3.org/2001/XMLSchema-instance', 'soap': 'http://schemas.xmlsoap.org/soap/envelope/', } video_url = xpath_text( video_data, xpath_with_ns('.//video', NS_MAP), 'video url', fatal=True) thumbnail = xpath_text( video_data, xpath_with_ns('.//image', NS_MAP), 'thumbnail') description = self._og_search_description( webpage) or self._html_search_meta('description', webpage) return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rentv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( determine_ext, int_or_none, url_or_none, ) class RENTVIE(InfoExtractor): _VALID_URL = r'(?:rentv:|https?://(?:www\.)?ren\.tv/(?:player|video/epizod)/)(?P<id>\d+)' _TESTS = [{ 'url': 'http://ren.tv/video/epizod/118577', 'md5': 'd91851bf9af73c0ad9b2cdf76c127fbb', 'info_dict': { 'id': '118577', 'ext': 'mp4', 'title': 'Документальный спецпроект: "Промывка мозгов. Технологии XXI века"', 'timestamp': 1472230800, 'upload_date': '20160826', } }, { 'url': 'http://ren.tv/player/118577', 'only_matching': True, }, { 'url': 'rentv:118577', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage('http://ren.tv/player/' + video_id, video_id) config = self._parse_json(self._search_regex( r'config\s*=\s*({.+})\s*;', webpage, 'config'), video_id) title = config['title'] formats = [] for video in config['src']: src = url_or_none(video.get('src')) if not src: continue ext = determine_ext(src) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( src, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'url': src, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': config.get('description'), 'thumbnail': config.get('image'), 'duration': int_or_none(config.get('duration')), 'timestamp': int_or_none(config.get('date')), 'formats': formats, } class RENTVArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ren\.tv/novosti/\d{4}-\d{2}-\d{2}/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'http://ren.tv/novosti/2016-10-26/video-mikroavtobus-popavshiy-v-dtp-s-gruzovikami-v-podmoskove-prevratilsya-v', 'md5': 'ebd63c4680b167693745ab91343df1d6', 'info_dict': { 'id': '136472', 'ext': 'mp4', 'title': 'Видео: микроавтобус, попавший в ДТП с грузовиками в Подмосковье, превратился в груду металла', 'description': 'Жертвами столкновения двух фур и микроавтобуса, по последним данным, стали семь человек.', } }, { # TODO: invalid m3u8 'url': 'http://ren.tv/novosti/2015-09-25/sluchaynyy-prohozhiy-poymal-avtougonshchika-v-murmanske-video', 'info_dict': { 'id': 'playlist', 'ext': 'mp4', 'title': 'Случайный прохожий поймал автоугонщика в Мурманске. ВИДЕО | РЕН ТВ', 'uploader': 'ren.tv', }, 'params': { # m3u8 downloads 'skip_download': True, }, 'skip': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) drupal_settings = self._parse_json(self._search_regex( r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', webpage, 'drupal settings'), display_id) entries = [] for config_profile in drupal_settings.get('ren_jwplayer', {}).values(): media_id = config_profile.get('mediaid') if not media_id: continue media_id = compat_str(media_id) entries.append(self.url_result('rentv:' + media_id, 'RENTV', media_id)) return self.playlist_result(entries, display_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/restudy.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class RestudyIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|portal)\.)?restudy\.dk/video/[^/]+/id/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.restudy.dk/video/play/id/1637', 'info_dict': { 'id': '1637', 'ext': 'flv', 'title': 'Leiden-frosteffekt', 'description': 'Denne video er et eksperiment med flydende kvælstof.', }, 'params': { # rtmp download 'skip_download': True, } }, { 'url': 'https://portal.restudy.dk/video/leiden-frosteffekt/id/1637', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage).strip() description = self._og_search_description(webpage).strip() formats = self._extract_smil_formats( 'https://cdn.portal.restudy.dk/dynamic/themes/front/awsmedia/SmilDirectory/video_%s.xml' % video_id, video_id) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/reuters.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( js_to_json, int_or_none, unescapeHTML, ) class ReutersIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?reuters\.com/.*?\?.*?videoId=(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.reuters.com/video/2016/05/20/san-francisco-police-chief-resigns?videoId=368575562', 'md5': '8015113643a0b12838f160b0b81cc2ee', 'info_dict': { 'id': '368575562', 'ext': 'mp4', 'title': 'San Francisco police chief resigns', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'http://www.reuters.com/assets/iframe/yovideo?videoId=%s' % video_id, video_id) video_data = js_to_json(self._search_regex( r'(?s)Reuters\.yovideo\.drawPlayer\(({.*?})\);', webpage, 'video data')) def get_json_value(key, fatal=False): return self._search_regex(r'"%s"\s*:\s*"([^"]+)"' % key, video_data, key, fatal=fatal) title = unescapeHTML(get_json_value('title', fatal=True)) mmid, fid = re.search(r',/(\d+)\?f=(\d+)', get_json_value('flv', fatal=True)).groups() mas_data = self._download_json( 'http://mas-e.cds1.yospace.com/mas/%s/%s?trans=json' % (mmid, fid), video_id, transform_source=js_to_json) formats = [] for f in mas_data: f_url = f.get('url') if not f_url: continue method = f.get('method') if method == 'hls': formats.extend(self._extract_m3u8_formats( f_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: container = f.get('container') ext = '3gp' if method == 'mobile' else container formats.append({ 'format_id': ext, 'url': f_url, 'ext': ext, 'container': container if method != 'mobile' else None, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'thumbnail': get_json_value('thumb'), 'duration': int_or_none(get_json_value('seconds')), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/reverbnation.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( qualities, str_or_none, ) class ReverbNationIE(InfoExtractor): _VALID_URL = r'^https?://(?:www\.)?reverbnation\.com/.*?/song/(?P<id>\d+).*?$' _TESTS = [{ 'url': 'http://www.reverbnation.com/alkilados/song/16965047-mona-lisa', 'md5': 'c0aaf339bcee189495fdf5a8c8ba8645', 'info_dict': { 'id': '16965047', 'ext': 'mp3', 'title': 'MONA LISA', 'uploader': 'ALKILADOS', 'uploader_id': '216429', 'thumbnail': r're:^https?://.*\.jpg', }, }] def _real_extract(self, url): song_id = self._match_id(url) api_res = self._download_json( 'https://api.reverbnation.com/song/%s' % song_id, song_id, note='Downloading information of song %s' % song_id ) THUMBNAILS = ('thumbnail', 'image') quality = qualities(THUMBNAILS) thumbnails = [] for thumb_key in THUMBNAILS: if api_res.get(thumb_key): thumbnails.append({ 'url': api_res[thumb_key], 'preference': quality(thumb_key) }) return { 'id': song_id, 'title': api_res['name'], 'url': api_res['url'], 'uploader': api_res.get('artist', {}).get('name'), 'uploader_id': str_or_none(api_res.get('artist', {}).get('id')), 'thumbnails': thumbnails, 'ext': 'mp3', 'vcodec': 'none', }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/revision3.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, parse_iso8601, unescapeHTML, qualities, ) class Revision3EmbedIE(InfoExtractor): IE_NAME = 'revision3:embed' _VALID_URL = r'(?:revision3:(?:(?P<playlist_type>[^:]+):)?|https?://(?:(?:(?:www|embed)\.)?(?:revision3|animalist)|(?:(?:api|embed)\.)?seekernetwork)\.com/player/embed\?videoId=)(?P<playlist_id>\d+)' _TEST = { 'url': 'http://api.seekernetwork.com/player/embed?videoId=67558', 'md5': '83bcd157cab89ad7318dd7b8c9cf1306', 'info_dict': { 'id': '67558', 'ext': 'mp4', 'title': 'The Pros & Cons Of Zoos', 'description': 'Zoos are often depicted as a terrible place for animals to live, but is there any truth to this?', 'uploader_id': 'dnews', 'uploader': 'DNews', } } _API_KEY = 'ba9c741bce1b9d8e3defcc22193f3651b8867e62' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) playlist_id = mobj.group('playlist_id') playlist_type = mobj.group('playlist_type') or 'video_id' video_data = self._download_json( 'http://revision3.com/api/getPlaylist.json', playlist_id, query={ 'api_key': self._API_KEY, 'codecs': 'h264,vp8,theora', playlist_type: playlist_id, })['items'][0] formats = [] for vcodec, media in video_data['media'].items(): for quality_id, quality in media.items(): if quality_id == 'hls': formats.extend(self._extract_m3u8_formats( quality['url'], playlist_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'url': quality['url'], 'format_id': '%s-%s' % (vcodec, quality_id), 'tbr': int_or_none(quality.get('bitrate')), 'vcodec': vcodec, }) self._sort_formats(formats) return { 'id': playlist_id, 'title': unescapeHTML(video_data['title']), 'description': unescapeHTML(video_data.get('summary')), 'uploader': video_data.get('show', {}).get('name'), 'uploader_id': video_data.get('show', {}).get('slug'), 'duration': int_or_none(video_data.get('duration')), 'formats': formats, } class Revision3IE(InfoExtractor): IE_NAME = 'revision' _VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:revision3|animalist)\.com)/(?P<id>[^/]+(?:/[^/?#]+)?)' _TESTS = [{ 'url': 'http://www.revision3.com/technobuffalo/5-google-predictions-for-2016', 'md5': 'd94a72d85d0a829766de4deb8daaf7df', 'info_dict': { 'id': '71089', 'display_id': 'technobuffalo/5-google-predictions-for-2016', 'ext': 'webm', 'title': '5 Google Predictions for 2016', 'description': 'Google had a great 2015, but it\'s already time to look ahead. Here are our five predictions for 2016.', 'upload_date': '20151228', 'timestamp': 1451325600, 'duration': 187, 'uploader': 'TechnoBuffalo', 'uploader_id': 'technobuffalo', } }, { # Show 'url': 'http://revision3.com/variant', 'only_matching': True, }, { # Tag 'url': 'http://revision3.com/vr', 'only_matching': True, }] _PAGE_DATA_TEMPLATE = 'http://www.%s/apiProxy/ddn/%s?domain=%s' def _real_extract(self, url): domain, display_id = re.match(self._VALID_URL, url).groups() site = domain.split('.')[0] page_info = self._download_json( self._PAGE_DATA_TEMPLATE % (domain, display_id, domain), display_id) page_data = page_info['data'] page_type = page_data['type'] if page_type in ('episode', 'embed'): show_data = page_data['show']['data'] page_id = compat_str(page_data['id']) video_id = compat_str(page_data['video']['data']['id']) preference = qualities(['mini', 'small', 'medium', 'large']) thumbnails = [{ 'url': image_url, 'id': image_id, 'preference': preference(image_id) } for image_id, image_url in page_data.get('images', {}).items()] info = { 'id': page_id, 'display_id': display_id, 'title': unescapeHTML(page_data['name']), 'description': unescapeHTML(page_data.get('summary')), 'timestamp': parse_iso8601(page_data.get('publishTime'), ' '), 'author': page_data.get('author'), 'uploader': show_data.get('name'), 'uploader_id': show_data.get('slug'), 'thumbnails': thumbnails, 'extractor_key': site, } if page_type == 'embed': info.update({ '_type': 'url_transparent', 'url': page_data['video']['data']['embed'], }) return info info.update({ '_type': 'url_transparent', 'url': 'revision3:%s' % video_id, }) return info else: list_data = page_info[page_type]['data'] episodes_data = page_info['episodes']['data'] num_episodes = page_info['meta']['totalEpisodes'] processed_episodes = 0 entries = [] page_num = 1 while True: entries.extend([{ '_type': 'url', 'url': 'http://%s%s' % (domain, episode['path']), 'id': compat_str(episode['id']), 'ie_key': 'Revision3', 'extractor_key': site, } for episode in episodes_data]) processed_episodes += len(episodes_data) if processed_episodes == num_episodes: break page_num += 1 episodes_data = self._download_json(self._PAGE_DATA_TEMPLATE % ( domain, display_id + '/' + compat_str(page_num), domain), display_id)['episodes']['data'] return self.playlist_result( entries, compat_str(list_data['id']), list_data.get('name'), list_data.get('summary'))
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rice.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_parse_qs from ..utils import ( xpath_text, xpath_element, int_or_none, parse_iso8601, ExtractorError, ) class RICEIE(InfoExtractor): _VALID_URL = r'https?://mediahub\.rice\.edu/app/[Pp]ortal/video\.aspx\?(?P<query>.+)' _TEST = { 'url': 'https://mediahub.rice.edu/app/Portal/video.aspx?PortalID=25ffd62c-3d01-4b29-8c70-7c94270efb3e&DestinationID=66bc9434-03bd-4725-b47e-c659d8d809db&ContentID=YEWIvbhb40aqdjMD1ALSqw', 'md5': '9b83b4a2eead4912dc3b7fac7c449b6a', 'info_dict': { 'id': 'YEWIvbhb40aqdjMD1ALSqw', 'ext': 'mp4', 'title': 'Active Learning in Archeology', 'upload_date': '20140616', 'timestamp': 1402926346, } } _NS = 'http://schemas.datacontract.org/2004/07/ensembleVideo.Data.Service.Contracts.Models.Player.Config' def _real_extract(self, url): qs = compat_parse_qs(re.match(self._VALID_URL, url).group('query')) if not qs.get('PortalID') or not qs.get('DestinationID') or not qs.get('ContentID'): raise ExtractorError('Invalid URL', expected=True) portal_id = qs['PortalID'][0] playlist_id = qs['DestinationID'][0] content_id = qs['ContentID'][0] content_data = self._download_xml('https://mediahub.rice.edu/api/portal/GetContentTitle', content_id, query={ 'portalId': portal_id, 'playlistId': playlist_id, 'contentId': content_id }) metadata = xpath_element(content_data, './/metaData', fatal=True) title = xpath_text(metadata, 'primaryTitle', fatal=True) encodings = xpath_element(content_data, './/encodings', fatal=True) player_data = self._download_xml('https://mediahub.rice.edu/api/player/GetPlayerConfig', content_id, query={ 'temporaryLinkId': xpath_text(encodings, 'temporaryLinkId', fatal=True), 'contentId': content_id, }) common_fmt = {} dimensions = xpath_text(encodings, 'dimensions') if dimensions: wh = dimensions.split('x') if len(wh) == 2: common_fmt.update({ 'width': int_or_none(wh[0]), 'height': int_or_none(wh[1]), }) formats = [] rtsp_path = xpath_text(player_data, self._xpath_ns('RtspPath', self._NS)) if rtsp_path: fmt = { 'url': rtsp_path, 'format_id': 'rtsp', } fmt.update(common_fmt) formats.append(fmt) for source in player_data.findall(self._xpath_ns('.//Source', self._NS)): video_url = xpath_text(source, self._xpath_ns('File', self._NS)) if not video_url: continue if '.m3u8' in video_url: formats.extend(self._extract_m3u8_formats(video_url, content_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: fmt = { 'url': video_url, 'format_id': video_url.split(':')[0], } fmt.update(common_fmt) rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', video_url) if rtmp: fmt.update({ 'url': rtmp.group('url'), 'play_path': rtmp.group('playpath'), 'app': rtmp.group('app'), 'ext': 'flv', }) formats.append(fmt) self._sort_formats(formats) thumbnails = [] for content_asset in content_data.findall('.//contentAssets'): asset_type = xpath_text(content_asset, 'type') if asset_type == 'image': image_url = xpath_text(content_asset, 'httpPath') if not image_url: continue thumbnails.append({ 'id': xpath_text(content_asset, 'ID'), 'url': image_url, }) return { 'id': content_id, 'title': title, 'description': xpath_text(metadata, 'abstract'), 'duration': int_or_none(xpath_text(metadata, 'duration')), 'timestamp': parse_iso8601(xpath_text(metadata, 'dateUpdated')), 'thumbnails': thumbnails, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rmcdecouverte.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from .brightcove import BrightcoveLegacyIE from ..compat import ( compat_parse_qs, compat_urlparse, ) from ..utils import smuggle_url class RMCDecouverteIE(InfoExtractor): _VALID_URL = r'https?://rmcdecouverte\.bfmtv\.com/(?:(?:[^/]+/)*program_(?P<id>\d+)|(?P<live_id>mediaplayer-direct))' _TESTS = [{ 'url': 'https://rmcdecouverte.bfmtv.com/wheeler-dealers-occasions-a-saisir/program_2566/', 'info_dict': { 'id': '5983675500001', 'ext': 'mp4', 'title': 'CORVETTE', 'description': 'md5:c1e8295521e45ffebf635d6a7658f506', 'uploader_id': '1969646226001', 'upload_date': '20181226', 'timestamp': 1545861635, }, 'params': { 'skip_download': True, }, 'skip': 'only available for a week', }, { # live, geo restricted, bypassable 'url': 'https://rmcdecouverte.bfmtv.com/mediaplayer-direct/', 'only_matching': True, }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1969646226001/default_default/index.html?videoId=%s' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id = mobj.group('id') or mobj.group('live_id') webpage = self._download_webpage(url, display_id) brightcove_legacy_url = BrightcoveLegacyIE._extract_brightcove_url(webpage) if brightcove_legacy_url: brightcove_id = compat_parse_qs(compat_urlparse.urlparse( brightcove_legacy_url).query)['@videoPlayer'][0] else: brightcove_id = self._search_regex( r'data-video-id=["\'](\d+)', webpage, 'brightcove id') return self.url_result( smuggle_url( self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, {'geo_countries': ['FR']}), 'BrightcoveNew', brightcove_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/ro220.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urllib_parse_unquote class Ro220IE(InfoExtractor): IE_NAME = '220.ro' _VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<id>[^/]+)' _TEST = { 'url': 'http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/', 'md5': '03af18b73a07b4088753930db7a34add', 'info_dict': { 'id': 'LYV6doKo7f', 'ext': 'mp4', 'title': 'Luati-le Banii sez 4 ep 1', 'description': r're:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) url = compat_urllib_parse_unquote(self._search_regex( r'(?s)clip\s*:\s*{.*?url\s*:\s*\'([^\']+)\'', webpage, 'url')) title = self._og_search_title(webpage) description = self._og_search_description(webpage) thumbnail = self._og_search_thumbnail(webpage) formats = [{ 'format_id': 'sd', 'url': url, 'ext': 'mp4', }] return { 'id': video_id, 'formats': formats, 'title': title, 'description': description, 'thumbnail': thumbnail, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rockstargames.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, ) class RockstarGamesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rockstargames\.com/videos(?:/video/|#?/?\?.*\bvideo=)(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.rockstargames.com/videos/video/11544/', 'md5': '03b5caa6e357a4bd50e3143fc03e5733', 'info_dict': { 'id': '11544', 'ext': 'mp4', 'title': 'Further Adventures in Finance and Felony Trailer', 'description': 'md5:6d31f55f30cb101b5476c4a379e324a3', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1464876000, 'upload_date': '20160602', } }, { 'url': 'http://www.rockstargames.com/videos#/?video=48', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'https://www.rockstargames.com/videoplayer/videos/get-video.json', video_id, query={ 'id': video_id, 'locale': 'en_us', })['video'] title = video['title'] formats = [] for video in video['files_processed']['video/mp4']: if not video.get('src'): continue resolution = video.get('resolution') height = int_or_none(self._search_regex( r'^(\d+)[pP]$', resolution or '', 'height', default=None)) formats.append({ 'url': self._proto_relative_url(video['src']), 'format_id': resolution, 'height': height, }) if not formats: youtube_id = video.get('youtube_id') if youtube_id: return self.url_result(youtube_id, 'Youtube') self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': video.get('description'), 'thumbnail': self._proto_relative_url(video.get('screencap')), 'timestamp': parse_iso8601(video.get('created')), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/roosterteeth.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_str, ) from ..utils import ( ExtractorError, int_or_none, str_or_none, urlencode_postdata, ) class RoosterTeethIE(InfoExtractor): _VALID_URL = r'https?://(?:.+?\.)?roosterteeth\.com/(?:episode|watch)/(?P<id>[^/?#&]+)' _LOGIN_URL = 'https://roosterteeth.com/login' _NETRC_MACHINE = 'roosterteeth' _TESTS = [{ 'url': 'http://roosterteeth.com/episode/million-dollars-but-season-2-million-dollars-but-the-game-announcement', 'md5': 'e2bd7764732d785ef797700a2489f212', 'info_dict': { 'id': '9156', 'display_id': 'million-dollars-but-season-2-million-dollars-but-the-game-announcement', 'ext': 'mp4', 'title': 'Million Dollars, But... The Game Announcement', 'description': 'md5:168a54b40e228e79f4ddb141e89fe4f5', 'thumbnail': r're:^https?://.*\.png$', 'series': 'Million Dollars, But...', 'episode': 'Million Dollars, But... The Game Announcement', }, }, { 'url': 'http://achievementhunter.roosterteeth.com/episode/off-topic-the-achievement-hunter-podcast-2016-i-didn-t-think-it-would-pass-31', 'only_matching': True, }, { 'url': 'http://funhaus.roosterteeth.com/episode/funhaus-shorts-2016-austin-sucks-funhaus-shorts', 'only_matching': True, }, { 'url': 'http://screwattack.roosterteeth.com/episode/death-battle-season-3-mewtwo-vs-shadow', 'only_matching': True, }, { 'url': 'http://theknow.roosterteeth.com/episode/the-know-game-news-season-1-boring-steam-sales-are-better', 'only_matching': True, }, { # only available for FIRST members 'url': 'http://roosterteeth.com/episode/rt-docs-the-world-s-greatest-head-massage-the-world-s-greatest-head-massage-an-asmr-journey-part-one', 'only_matching': True, }, { 'url': 'https://roosterteeth.com/watch/million-dollars-but-season-2-million-dollars-but-the-game-announcement', 'only_matching': True, }] def _login(self): username, password = self._get_login_info() if username is None: return login_page = self._download_webpage( self._LOGIN_URL, None, note='Downloading login page', errnote='Unable to download login page') login_form = self._hidden_inputs(login_page) login_form.update({ 'username': username, 'password': password, }) login_request = self._download_webpage( self._LOGIN_URL, None, note='Logging in', data=urlencode_postdata(login_form), headers={ 'Referer': self._LOGIN_URL, }) if not any(re.search(p, login_request) for p in ( r'href=["\']https?://(?:www\.)?roosterteeth\.com/logout"', r'>Sign Out<')): error = self._html_search_regex( r'(?s)<div[^>]+class=(["\']).*?\balert-danger\b.*?\1[^>]*>(?:\s*<button[^>]*>.*?</button>)?(?P<error>.+?)</div>', login_request, 'alert', default=None, group='error') if error: raise ExtractorError('Unable to login: %s' % error, expected=True) raise ExtractorError('Unable to log in') def _real_initialize(self): self._login() def _real_extract(self, url): display_id = self._match_id(url) api_episode_url = 'https://svod-be.roosterteeth.com/api/v1/episodes/%s' % display_id try: m3u8_url = self._download_json( api_episode_url + '/videos', display_id, 'Downloading video JSON metadata')['data'][0]['attributes']['url'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: if self._parse_json(e.cause.read().decode(), display_id).get('access') is False: self.raise_login_required( '%s is only available for FIRST members' % display_id) raise formats = self._extract_m3u8_formats( m3u8_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls') self._sort_formats(formats) episode = self._download_json( api_episode_url, display_id, 'Downloading episode JSON metadata')['data'][0] attributes = episode['attributes'] title = attributes.get('title') or attributes['display_title'] video_id = compat_str(episode['id']) thumbnails = [] for image in episode.get('included', {}).get('images', []): if image.get('type') == 'episode_image': img_attributes = image.get('attributes') or {} for k in ('thumb', 'small', 'medium', 'large'): img_url = img_attributes.get(k) if img_url: thumbnails.append({ 'id': k, 'url': img_url, }) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': attributes.get('description') or attributes.get('caption'), 'thumbnails': thumbnails, 'series': attributes.get('show_title'), 'season_number': int_or_none(attributes.get('season_number')), 'season_id': attributes.get('season_id'), 'episode': title, 'episode_number': int_or_none(attributes.get('number')), 'episode_id': str_or_none(episode.get('uuid')), 'formats': formats, 'channel_id': attributes.get('channel_id'), 'duration': int_or_none(attributes.get('length')), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rottentomatoes.py
from __future__ import unicode_literals from .common import InfoExtractor from .internetvideoarchive import InternetVideoArchiveIE class RottenTomatoesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rottentomatoes\.com/m/[^/]+/trailers/(?P<id>\d+)' _TEST = { 'url': 'http://www.rottentomatoes.com/m/toy_story_3/trailers/11028566/', 'info_dict': { 'id': '11028566', 'ext': 'mp4', 'title': 'Toy Story 3', 'description': 'From the creators of the beloved TOY STORY films, comes a story that will reunite the gang in a whole new way.', 'thumbnail': r're:^https?://.*\.jpg$', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) iva_id = self._search_regex(r'publishedid=(\d+)', webpage, 'internet video archive id') return { '_type': 'url_transparent', 'url': 'http://video.internetvideoarchive.net/player/6/configuration.ashx?domain=www.videodetective.com&customerid=69249&playerid=641&publishedid=' + iva_id, 'ie_key': InternetVideoArchiveIE.ie_key(), 'id': video_id, 'title': self._og_search_title(webpage), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/roxwel.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import unified_strdate, determine_ext class RoxwelIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?roxwel\.com/player/(?P<filename>.+?)(\.|\?|$)' _TEST = { 'url': 'http://www.roxwel.com/player/passionpittakeawalklive.html', 'info_dict': { 'id': 'passionpittakeawalklive', 'ext': 'flv', 'title': 'Take A Walk (live)', 'uploader': 'Passion Pit', 'uploader_id': 'passionpit', 'upload_date': '20120928', 'description': 'Passion Pit performs "Take A Walk\" live at The Backyard in Austin, Texas. ', }, 'params': { # rtmp download 'skip_download': True, } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) filename = mobj.group('filename') info_url = 'http://www.roxwel.com/api/videos/%s' % filename info = self._download_json(info_url, filename) rtmp_rates = sorted([int(r.replace('flv_', '')) for r in info['media_rates'] if r.startswith('flv_')]) best_rate = rtmp_rates[-1] url_page_url = 'http://roxwel.com/pl_one_time.php?filename=%s&quality=%s' % (filename, best_rate) rtmp_url = self._download_webpage(url_page_url, filename, 'Downloading video url') ext = determine_ext(rtmp_url) if ext == 'f4v': rtmp_url = rtmp_url.replace(filename, 'mp4:%s' % filename) return { 'id': filename, 'title': info['title'], 'url': rtmp_url, 'ext': 'flv', 'description': info['description'], 'thumbnail': info.get('player_image_url') or info.get('image_url_large'), 'uploader': info['artist'], 'uploader_id': info['artistname'], 'upload_date': unified_strdate(info['dbdate']), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rozhlas.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, remove_start, ) class RozhlasIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?prehravac\.rozhlas\.cz/audio/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://prehravac.rozhlas.cz/audio/3421320', 'md5': '504c902dbc9e9a1fd50326eccf02a7e2', 'info_dict': { 'id': '3421320', 'ext': 'mp3', 'title': 'Echo Pavla Klusáka (30.06.2015 21:00)', 'description': 'Osmdesátiny Terryho Rileyho jsou skvělou příležitostí proletět se elektronickými i akustickými díly zakladatatele minimalismu, který je aktivní už přes padesát let' } }, { 'url': 'http://prehravac.rozhlas.cz/audio/3421320/embed', 'only_matching': True, }] def _real_extract(self, url): audio_id = self._match_id(url) webpage = self._download_webpage( 'http://prehravac.rozhlas.cz/audio/%s' % audio_id, audio_id) title = self._html_search_regex( r'<h3>(.+?)</h3>\s*<p[^>]*>.*?</p>\s*<div[^>]+id=["\']player-track', webpage, 'title', default=None) or remove_start( self._og_search_title(webpage), 'Radio Wave - ') description = self._html_search_regex( r'<p[^>]+title=(["\'])(?P<url>(?:(?!\1).)+)\1[^>]*>.*?</p>\s*<div[^>]+id=["\']player-track', webpage, 'description', fatal=False, group='url') duration = int_or_none(self._search_regex( r'data-duration=["\'](\d+)', webpage, 'duration', default=None)) return { 'id': audio_id, 'url': 'http://media.rozhlas.cz/_audio/%s.mp3' % audio_id, 'title': title, 'description': description, 'duration': duration, 'vcodec': 'none', }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rtbf.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, strip_or_none, ) class RTBFIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?:www\.)?rtbf\.be/ (?: video/[^?]+\?.*\bid=| ouftivi/(?:[^/]+/)*[^?]+\?.*\bvideoId=| auvio/[^/]+\?.*\b(?P<live>l)?id= )(?P<id>\d+)''' _TESTS = [{ 'url': 'https://www.rtbf.be/video/detail_les-diables-au-coeur-episode-2?id=1921274', 'md5': '8c876a1cceeb6cf31b476461ade72384', 'info_dict': { 'id': '1921274', 'ext': 'mp4', 'title': 'Les Diables au coeur (épisode 2)', 'description': '(du 25/04/2014)', 'duration': 3099.54, 'upload_date': '20140425', 'timestamp': 1398456300, } }, { # geo restricted 'url': 'http://www.rtbf.be/ouftivi/heros/detail_scooby-doo-mysteres-associes?id=1097&videoId=2057442', 'only_matching': True, }, { 'url': 'http://www.rtbf.be/ouftivi/niouzz?videoId=2055858', 'only_matching': True, }, { 'url': 'http://www.rtbf.be/auvio/detail_jeudi-en-prime-siegfried-bracke?id=2102996', 'only_matching': True, }, { # Live 'url': 'https://www.rtbf.be/auvio/direct_pure-fm?lid=134775', 'only_matching': True, }, { # Audio 'url': 'https://www.rtbf.be/auvio/detail_cinq-heures-cinema?id=2360811', 'only_matching': True, }, { # With Subtitle 'url': 'https://www.rtbf.be/auvio/detail_les-carnets-du-bourlingueur?id=2361588', 'only_matching': True, }] _IMAGE_HOST = 'http://ds1.ds.static.rtbf.be' _PROVIDERS = { 'YOUTUBE': 'Youtube', 'DAILYMOTION': 'Dailymotion', 'VIMEO': 'Vimeo', } _QUALITIES = [ ('mobile', 'SD'), ('web', 'MD'), ('high', 'HD'), ] def _real_extract(self, url): live, media_id = re.match(self._VALID_URL, url).groups() embed_page = self._download_webpage( 'https://www.rtbf.be/auvio/embed/' + ('direct' if live else 'media'), media_id, query={'id': media_id}) data = self._parse_json(self._html_search_regex( r'data-media="([^"]+)"', embed_page, 'media data'), media_id) error = data.get('error') if error: raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True) provider = data.get('provider') if provider in self._PROVIDERS: return self.url_result(data['url'], self._PROVIDERS[provider]) title = data['title'] is_live = data.get('isLive') if is_live: title = self._live_title(title) height_re = r'-(\d+)p\.' formats = [] m3u8_url = data.get('urlHlsAes128') or data.get('urlHls') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False)) fix_url = lambda x: x.replace('//rtbf-vod.', '//rtbf.') if '/geo/drm/' in x else x http_url = data.get('url') if formats and http_url and re.search(height_re, http_url): http_url = fix_url(http_url) for m3u8_f in formats[:]: height = m3u8_f.get('height') if not height: continue f = m3u8_f.copy() del f['protocol'] f.update({ 'format_id': m3u8_f['format_id'].replace('hls-', 'http-'), 'url': re.sub(height_re, '-%dp.' % height, http_url), }) formats.append(f) else: sources = data.get('sources') or {} for key, format_id in self._QUALITIES: format_url = sources.get(key) if not format_url: continue height = int_or_none(self._search_regex( height_re, format_url, 'height', default=None)) formats.append({ 'format_id': format_id, 'url': fix_url(format_url), 'height': height, }) mpd_url = data.get('urlDash') if not data.get('drm') and mpd_url: formats.extend(self._extract_mpd_formats( mpd_url, media_id, mpd_id='dash', fatal=False)) audio_url = data.get('urlAudio') if audio_url: formats.append({ 'format_id': 'audio', 'url': audio_url, 'vcodec': 'none', }) self._sort_formats(formats) subtitles = {} for track in (data.get('tracks') or {}).values(): sub_url = track.get('url') if not sub_url: continue subtitles.setdefault(track.get('lang') or 'fr', []).append({ 'url': sub_url, }) return { 'id': media_id, 'formats': formats, 'title': title, 'description': strip_or_none(data.get('description')), 'thumbnail': data.get('thumbnail'), 'duration': float_or_none(data.get('realDuration')), 'timestamp': int_or_none(data.get('liveFrom')), 'series': data.get('programLabel'), 'subtitles': subtitles, 'is_live': is_live, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rte.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( float_or_none, parse_iso8601, str_or_none, try_get, unescapeHTML, url_or_none, ExtractorError, ) class RteBaseIE(InfoExtractor): def _real_extract(self, url): item_id = self._match_id(url) info_dict = {} formats = [] ENDPOINTS = ( 'https://feeds.rasset.ie/rteavgen/player/playlist?type=iptv&format=json&showId=', 'http://www.rte.ie/rteavgen/getplaylist/?type=web&format=json&id=', ) for num, ep_url in enumerate(ENDPOINTS, start=1): try: data = self._download_json(ep_url + item_id, item_id) except ExtractorError as ee: if num < len(ENDPOINTS) or formats: continue if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404: error_info = self._parse_json(ee.cause.read().decode(), item_id, fatal=False) if error_info: raise ExtractorError( '%s said: %s' % (self.IE_NAME, error_info['message']), expected=True) raise # NB the string values in the JSON are stored using XML escaping(!) show = try_get(data, lambda x: x['shows'][0], dict) if not show: continue if not info_dict: title = unescapeHTML(show['title']) description = unescapeHTML(show.get('description')) thumbnail = show.get('thumbnail') duration = float_or_none(show.get('duration'), 1000) timestamp = parse_iso8601(show.get('published')) info_dict = { 'id': item_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, } mg = try_get(show, lambda x: x['media:group'][0], dict) if not mg: continue if mg.get('url'): m = re.match(r'(?P<url>rtmpe?://[^/]+)/(?P<app>.+)/(?P<playpath>mp4:.*)', mg['url']) if m: m = m.groupdict() formats.append({ 'url': m['url'] + '/' + m['app'], 'app': m['app'], 'play_path': m['playpath'], 'player_url': url, 'ext': 'flv', 'format_id': 'rtmp', }) if mg.get('hls_server') and mg.get('hls_url'): formats.extend(self._extract_m3u8_formats( mg['hls_server'] + mg['hls_url'], item_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) if mg.get('hds_server') and mg.get('hds_url'): formats.extend(self._extract_f4m_formats( mg['hds_server'] + mg['hds_url'], item_id, f4m_id='hds', fatal=False)) mg_rte_server = str_or_none(mg.get('rte:server')) mg_url = str_or_none(mg.get('url')) if mg_rte_server and mg_url: hds_url = url_or_none(mg_rte_server + mg_url) if hds_url: formats.extend(self._extract_f4m_formats( hds_url, item_id, f4m_id='hds', fatal=False)) self._sort_formats(formats) info_dict['formats'] = formats return info_dict class RteIE(RteBaseIE): IE_NAME = 'rte' IE_DESC = 'Raidió Teilifís Éireann TV' _VALID_URL = r'https?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/[^/]+/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.rte.ie/player/ie/show/iwitness-862/10478715/', 'md5': '4a76eb3396d98f697e6e8110563d2604', 'info_dict': { 'id': '10478715', 'ext': 'mp4', 'title': 'iWitness', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'The spirit of Ireland, one voice and one minute at a time.', 'duration': 60.046, 'upload_date': '20151012', 'timestamp': 1444694160, }, } class RteRadioIE(RteBaseIE): IE_NAME = 'rte:radio' IE_DESC = 'Raidió Teilifís Éireann radio' # Radioplayer URLs have two distinct specifier formats, # the old format #!rii=<channel_id>:<id>:<playable_item_id>:<date>: # the new format #!rii=b<channel_id>_<id>_<playable_item_id>_<date>_ # where the IDs are int/empty, the date is DD-MM-YYYY, and the specifier may be truncated. # An <id> uniquely defines an individual recording, and is the only part we require. _VALID_URL = r'https?://(?:www\.)?rte\.ie/radio/utils/radioplayer/rteradioweb\.html#!rii=(?:b?[0-9]*)(?:%3A|:|%5F|_)(?P<id>[0-9]+)' _TESTS = [{ # Old-style player URL; HLS and RTMPE formats 'url': 'http://www.rte.ie/radio/utils/radioplayer/rteradioweb.html#!rii=16:10507902:2414:27-12-2015:', 'md5': 'c79ccb2c195998440065456b69760411', 'info_dict': { 'id': '10507902', 'ext': 'mp4', 'title': 'Gloria', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'md5:9ce124a7fb41559ec68f06387cabddf0', 'timestamp': 1451203200, 'upload_date': '20151227', 'duration': 7230.0, }, }, { # New-style player URL; RTMPE formats only 'url': 'http://rte.ie/radio/utils/radioplayer/rteradioweb.html#!rii=b16_3250678_8861_06-04-2012_', 'info_dict': { 'id': '3250678', 'ext': 'flv', 'title': 'The Lyric Concert with Paul Herriott', 'thumbnail': r're:^https?://.*\.jpg$', 'description': '', 'timestamp': 1333742400, 'upload_date': '20120406', 'duration': 7199.016, }, 'params': { # rtmp download 'skip_download': True, }, }]
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rtl2.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..aes import aes_cbc_decrypt from ..compat import ( compat_b64decode, compat_ord, compat_str, ) from ..utils import ( bytes_to_intlist, ExtractorError, intlist_to_bytes, int_or_none, strip_or_none, ) class RTL2IE(InfoExtractor): IE_NAME = 'rtl2' _VALID_URL = r'https?://(?:www\.)?rtl2\.de/sendung/[^/]+/(?:video/(?P<vico_id>\d+)[^/]+/(?P<vivi_id>\d+)-|folge/)(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'http://www.rtl2.de/sendung/grip-das-motormagazin/folge/folge-203-0', 'info_dict': { 'id': 'folge-203-0', 'ext': 'f4v', 'title': 'GRIP sucht den Sommerkönig', 'description': 'md5:e3adbb940fd3c6e76fa341b8748b562f' }, 'params': { # rtmp download 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest', 'Failed to download m3u8 information'], }, { 'url': 'http://www.rtl2.de/sendung/koeln-50667/video/5512-anna/21040-anna-erwischt-alex/', 'info_dict': { 'id': 'anna-erwischt-alex', 'ext': 'mp4', 'title': 'Anna erwischt Alex!', 'description': 'Anna nimmt ihrem Vater nicht ab, dass er nicht spielt. Und tatsächlich erwischt sie ihn auf frischer Tat.' }, 'params': { # rtmp download 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest', 'Failed to download m3u8 information'], }] def _real_extract(self, url): vico_id, vivi_id, display_id = re.match(self._VALID_URL, url).groups() if not vico_id: webpage = self._download_webpage(url, display_id) mobj = re.search( r'data-collection="(?P<vico_id>\d+)"[^>]+data-video="(?P<vivi_id>\d+)"', webpage) if mobj: vico_id = mobj.group('vico_id') vivi_id = mobj.group('vivi_id') else: vico_id = self._html_search_regex( r'vico_id\s*:\s*([0-9]+)', webpage, 'vico_id') vivi_id = self._html_search_regex( r'vivi_id\s*:\s*([0-9]+)', webpage, 'vivi_id') info = self._download_json( 'https://service.rtl2.de/api-player-vipo/video.php', display_id, query={ 'vico_id': vico_id, 'vivi_id': vivi_id, }) video_info = info['video'] title = video_info['titel'] formats = [] rtmp_url = video_info.get('streamurl') if rtmp_url: rtmp_url = rtmp_url.replace('\\', '') stream_url = 'mp4:' + self._html_search_regex(r'/ondemand/(.+)', rtmp_url, 'stream URL') rtmp_conn = ['S:connect', 'O:1', 'NS:pageUrl:' + url, 'NB:fpad:0', 'NN:videoFunction:1', 'O:0'] formats.append({ 'format_id': 'rtmp', 'url': rtmp_url, 'play_path': stream_url, 'player_url': 'https://www.rtl2.de/sites/default/modules/rtl2/jwplayer/jwplayer-7.6.0/jwplayer.flash.swf', 'page_url': url, 'flash_version': 'LNX 11,2,202,429', 'rtmp_conn': rtmp_conn, 'no_resume': True, 'preference': 1, }) m3u8_url = video_info.get('streamurl_hls') if m3u8_url: formats.extend(self._extract_akamai_formats(m3u8_url, display_id)) self._sort_formats(formats) return { 'id': display_id, 'title': title, 'thumbnail': video_info.get('image'), 'description': video_info.get('beschreibung'), 'duration': int_or_none(video_info.get('duration')), 'formats': formats, } class RTL2YouBaseIE(InfoExtractor): _BACKWERK_BASE_URL = 'https://p-you-backwerk.rtl2apps.de/' class RTL2YouIE(RTL2YouBaseIE): IE_NAME = 'rtl2:you' _VALID_URL = r'http?://you\.rtl2\.de/(?:video/\d+/|youplayer/index\.html\?.*?\bvid=)(?P<id>\d+)' _TESTS = [{ 'url': 'http://you.rtl2.de/video/3002/15740/MJUNIK%20%E2%80%93%20Home%20of%20YOU/307-hirn-wo-bist-du', 'info_dict': { 'id': '15740', 'ext': 'mp4', 'title': 'MJUNIK – Home of YOU - #307 Hirn, wo bist du?!', 'description': 'md5:ddaa95c61b372b12b66e115b2772fe01', 'age_limit': 12, }, }, { 'url': 'http://you.rtl2.de/youplayer/index.html?vid=15712', 'only_matching': True, }] _AES_KEY = b'\xe9W\xe4.<*\xb8\x1a\xd2\xb6\x92\xf3C\xd3\xefL\x1b\x03*\xbbbH\xc0\x03\xffo\xc2\xf2(\xaa\xaa!' _GEO_COUNTRIES = ['DE'] def _real_extract(self, url): video_id = self._match_id(url) stream_data = self._download_json( self._BACKWERK_BASE_URL + 'stream/video/' + video_id, video_id) data, iv = compat_b64decode(stream_data['streamUrl']).decode().split(':') stream_url = intlist_to_bytes(aes_cbc_decrypt( bytes_to_intlist(compat_b64decode(data)), bytes_to_intlist(self._AES_KEY), bytes_to_intlist(compat_b64decode(iv)) )) if b'rtl2_you_video_not_found' in stream_url: raise ExtractorError('video not found', expected=True) formats = self._extract_m3u8_formats( stream_url[:-compat_ord(stream_url[-1])].decode(), video_id, 'mp4', 'm3u8_native') self._sort_formats(formats) video_data = self._download_json( self._BACKWERK_BASE_URL + 'video/' + video_id, video_id) series = video_data.get('formatTitle') title = episode = video_data.get('title') or series if series and series != title: title = '%s - %s' % (series, title) return { 'id': video_id, 'title': title, 'formats': formats, 'description': strip_or_none(video_data.get('description')), 'thumbnail': video_data.get('image'), 'duration': int_or_none(stream_data.get('duration') or video_data.get('duration'), 1000), 'series': series, 'episode': episode, 'age_limit': int_or_none(video_data.get('minimumAge')), } class RTL2YouSeriesIE(RTL2YouBaseIE): IE_NAME = 'rtl2:you:series' _VALID_URL = r'http?://you\.rtl2\.de/videos/(?P<id>\d+)' _TEST = { 'url': 'http://you.rtl2.de/videos/115/dragon-ball', 'info_dict': { 'id': '115', }, 'playlist_mincount': 5, } def _real_extract(self, url): series_id = self._match_id(url) stream_data = self._download_json( self._BACKWERK_BASE_URL + 'videos', series_id, query={ 'formatId': series_id, 'limit': 1000000000, }) entries = [] for video in stream_data.get('videos', []): video_id = compat_str(video['videoId']) if not video_id: continue entries.append(self.url_result( 'http://you.rtl2.de/video/%s/%s' % (series_id, video_id), 'RTL2You', video_id)) return self.playlist_result(entries, series_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rtlnl.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, ) class RtlNlIE(InfoExtractor): IE_NAME = 'rtl.nl' IE_DESC = 'rtl.nl and rtlxl.nl' _VALID_URL = r'''(?x) https?://(?:(?:www|static)\.)? (?: rtlxl\.nl/[^\#]*\#!/[^/]+/| rtl\.nl/(?:(?:system/videoplayer/(?:[^/]+/)+(?:video_)?embed\.html|embed)\b.+?\buuid=|video/) ) (?P<id>[0-9a-f-]+)''' _TESTS = [{ 'url': 'http://www.rtlxl.nl/#!/rtl-nieuws-132237/82b1aad1-4a14-3d7b-b554-b0aed1b2c416', 'md5': '473d1946c1fdd050b2c0161a4b13c373', 'info_dict': { 'id': '82b1aad1-4a14-3d7b-b554-b0aed1b2c416', 'ext': 'mp4', 'title': 'RTL Nieuws', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'timestamp': 1461951000, 'upload_date': '20160429', 'duration': 1167.96, }, }, { # best format available a3t 'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed/autoplay=false', 'md5': 'dea7474214af1271d91ef332fb8be7ea', 'info_dict': { 'id': '84ae5571-ac25-4225-ae0c-ef8d9efb2aed', 'ext': 'mp4', 'timestamp': 1424039400, 'title': 'RTL Nieuws - Nieuwe beelden Kopenhagen: chaos direct na aanslag', 'thumbnail': r're:^https?://screenshots\.rtl\.nl/(?:[^/]+/)*sz=[0-9]+x[0-9]+/uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed$', 'upload_date': '20150215', 'description': 'Er zijn nieuwe beelden vrijgegeven die vlak na de aanslag in Kopenhagen zijn gemaakt. Op de video is goed te zien hoe omstanders zich bekommeren om één van de slachtoffers, terwijl de eerste agenten ter plaatse komen.', } }, { # empty synopsis and missing episodes (see https://github.com/ytdl-org/youtube-dl/issues/6275) # best format available nettv 'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a/autoplay=false', 'info_dict': { 'id': 'f536aac0-1dc3-4314-920e-3bd1c5b3811a', 'ext': 'mp4', 'title': 'RTL Nieuws - Meer beelden van overval juwelier', 'thumbnail': r're:^https?://screenshots\.rtl\.nl/(?:[^/]+/)*sz=[0-9]+x[0-9]+/uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a$', 'timestamp': 1437233400, 'upload_date': '20150718', 'duration': 30.474, }, 'params': { 'skip_download': True, }, }, { # encrypted m3u8 streams, georestricted 'url': 'http://www.rtlxl.nl/#!/afl-2-257632/52a74543-c504-4cde-8aa8-ec66fe8d68a7', 'only_matching': True, }, { 'url': 'http://www.rtl.nl/system/videoplayer/derden/embed.html#!/uuid=bb0353b0-d6a4-1dad-90e9-18fe75b8d1f0', 'only_matching': True, }, { 'url': 'http://rtlxl.nl/?_ga=1.204735956.572365465.1466978370#!/rtl-nieuws-132237/3c487912-023b-49ac-903e-2c5d79f8410f', 'only_matching': True, }, { 'url': 'https://www.rtl.nl/video/c603c9c2-601d-4b5e-8175-64f1e942dc7d/', 'only_matching': True, }, { 'url': 'https://static.rtl.nl/embed/?uuid=1a2970fc-5c0b-43ff-9fdc-927e39e6d1bc&autoplay=false&publicatiepunt=rtlnieuwsnl', 'only_matching': True, }] def _real_extract(self, url): uuid = self._match_id(url) info = self._download_json( 'http://www.rtl.nl/system/s4m/vfd/version=2/uuid=%s/fmt=adaptive/' % uuid, uuid) material = info['material'][0] title = info['abstracts'][0]['name'] subtitle = material.get('title') if subtitle: title += ' - %s' % subtitle description = material.get('synopsis') meta = info.get('meta', {}) videopath = material['videopath'] m3u8_url = meta.get('videohost', 'http://manifest.us.rtl.nl') + videopath formats = self._extract_m3u8_formats( m3u8_url, uuid, 'mp4', m3u8_id='hls', fatal=False) self._sort_formats(formats) thumbnails = [] for p in ('poster_base_url', '"thumb_base_url"'): if not meta.get(p): continue thumbnails.append({ 'url': self._proto_relative_url(meta[p] + uuid), 'width': int_or_none(self._search_regex( r'/sz=([0-9]+)', meta[p], 'thumbnail width', fatal=False)), 'height': int_or_none(self._search_regex( r'/sz=[0-9]+x([0-9]+)', meta[p], 'thumbnail height', fatal=False)) }) return { 'id': uuid, 'title': title, 'formats': formats, 'timestamp': material['original_date'], 'description': description, 'duration': parse_duration(material.get('duration')), 'thumbnails': thumbnails, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rtp.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, js_to_json, ) class RTPIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rtp\.pt/play/p(?P<program_id>[0-9]+)/(?P<id>[^/?#]+)/?' _TESTS = [{ 'url': 'http://www.rtp.pt/play/p405/e174042/paixoes-cruzadas', 'md5': 'e736ce0c665e459ddb818546220b4ef8', 'info_dict': { 'id': 'e174042', 'ext': 'mp3', 'title': 'Paixões Cruzadas', 'description': 'As paixões musicais de António Cartaxo e António Macedo', 'thumbnail': r're:^https?://.*\.jpg', }, }, { 'url': 'http://www.rtp.pt/play/p831/a-quimica-das-coisas', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_meta( 'twitter:title', webpage, display_name='title', fatal=True) config = self._parse_json(self._search_regex( r'(?s)RTPPlayer\(({.+?})\);', webpage, 'player config'), video_id, js_to_json) file_url = config['file'] ext = determine_ext(file_url) if ext == 'm3u8': file_key = config.get('fileKey') formats = self._extract_m3u8_formats( file_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=file_key) if file_key: formats.append({ 'url': 'https://cdn-ondemand.rtp.pt' + file_key, 'preference': 1, }) self._sort_formats(formats) else: formats = [{ 'url': file_url, 'ext': ext, }] if config.get('mediaType') == 'audio': for f in formats: f['vcodec'] = 'none' return { 'id': video_id, 'title': title, 'formats': formats, 'description': self._html_search_meta(['description', 'twitter:description'], webpage), 'thumbnail': config.get('poster') or self._og_search_thumbnail(webpage), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rts.py
# coding: utf-8 from __future__ import unicode_literals import re from .srgssr import SRGSSRIE from ..compat import compat_str from ..utils import ( int_or_none, parse_duration, parse_iso8601, unescapeHTML, determine_ext, ) class RTSIE(SRGSSRIE): IE_DESC = 'RTS.ch' _VALID_URL = r'rts:(?P<rts_id>\d+)|https?://(?:.+?\.)?rts\.ch/(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html' _TESTS = [ { 'url': 'http://www.rts.ch/archives/tv/divers/3449373-les-enfants-terribles.html', 'md5': 'ff7f8450a90cf58dacb64e29707b4a8e', 'info_dict': { 'id': '3449373', 'display_id': 'les-enfants-terribles', 'ext': 'mp4', 'duration': 1488, 'title': 'Les Enfants Terribles', 'description': 'France Pommier et sa soeur Luce Feral, les deux filles de ce groupe de 5.', 'uploader': 'Divers', 'upload_date': '19680921', 'timestamp': -40280400, 'thumbnail': r're:^https?://.*\.image', 'view_count': int, }, }, { 'url': 'http://www.rts.ch/emissions/passe-moi-les-jumelles/5624067-entre-ciel-et-mer.html', 'info_dict': { 'id': '5624065', 'title': 'Passe-moi les jumelles', }, 'playlist_mincount': 4, }, { 'url': 'http://www.rts.ch/video/sport/hockey/5745975-1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski.html', 'info_dict': { 'id': '5745975', 'display_id': '1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski', 'ext': 'mp4', 'duration': 48, 'title': '1/2, Kloten - Fribourg (5-2): second but pour Gottéron par Kwiatowski', 'description': 'Hockey - Playoff', 'uploader': 'Hockey', 'upload_date': '20140403', 'timestamp': 1396556882, 'thumbnail': r're:^https?://.*\.image', 'view_count': int, }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': 'Blocked outside Switzerland', }, { 'url': 'http://www.rts.ch/video/info/journal-continu/5745356-londres-cachee-par-un-epais-smog.html', 'md5': '1bae984fe7b1f78e94abc74e802ed99f', 'info_dict': { 'id': '5745356', 'display_id': 'londres-cachee-par-un-epais-smog', 'ext': 'mp4', 'duration': 33, 'title': 'Londres cachée par un épais smog', 'description': 'Un important voile de smog recouvre Londres depuis mercredi, provoqué par la pollution et du sable du Sahara.', 'uploader': 'L\'actu en vidéo', 'upload_date': '20140403', 'timestamp': 1396537322, 'thumbnail': r're:^https?://.*\.image', 'view_count': int, }, }, { 'url': 'http://www.rts.ch/audio/couleur3/programmes/la-belle-video-de-stephane-laurenceau/5706148-urban-hippie-de-damien-krisl-03-04-2014.html', 'md5': 'dd8ef6a22dff163d063e2a52bc8adcae', 'info_dict': { 'id': '5706148', 'display_id': 'urban-hippie-de-damien-krisl-03-04-2014', 'ext': 'mp3', 'duration': 123, 'title': '"Urban Hippie", de Damien Krisl', 'description': 'Des Hippies super glam.', 'upload_date': '20140403', 'timestamp': 1396551600, }, }, { # article with videos on rhs 'url': 'http://www.rts.ch/sport/hockey/6693917-hockey-davos-decroche-son-31e-titre-de-champion-de-suisse.html', 'info_dict': { 'id': '6693917', 'title': 'Hockey: Davos décroche son 31e titre de champion de Suisse', }, 'playlist_mincount': 5, }, { 'url': 'http://pages.rts.ch/emissions/passe-moi-les-jumelles/5624065-entre-ciel-et-mer.html', 'only_matching': True, } ] def _real_extract(self, url): m = re.match(self._VALID_URL, url) media_id = m.group('rts_id') or m.group('id') display_id = m.group('display_id') or media_id def download_json(internal_id): return self._download_json( 'http://www.rts.ch/a/%s.html?f=json/article' % internal_id, display_id) all_info = download_json(media_id) # media_id extracted out of URL is not always a real id if 'video' not in all_info and 'audio' not in all_info: entries = [] for item in all_info.get('items', []): item_url = item.get('url') if not item_url: continue entries.append(self.url_result(item_url, 'RTS')) if not entries: page, urlh = self._download_webpage_handle(url, display_id) if re.match(self._VALID_URL, urlh.geturl()).group('id') != media_id: return self.url_result(urlh.geturl(), 'RTS') # article with videos on rhs videos = re.findall( r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:([^"]+)"', page) if not videos: videos = re.findall( r'(?s)<iframe[^>]+class="srg-player"[^>]+src="[^"]+urn:([^"]+)"', page) if videos: entries = [self.url_result('srgssr:%s' % video_urn, 'SRGSSR') for video_urn in videos] if entries: return self.playlist_result(entries, media_id, all_info.get('title')) internal_id = self._html_search_regex( r'<(?:video|audio) data-id="([0-9]+)"', page, 'internal video id') all_info = download_json(internal_id) media_type = 'video' if 'video' in all_info else 'audio' # check for errors self.get_media_data('rts', media_type, media_id) info = all_info['video']['JSONinfo'] if 'video' in all_info else all_info['audio'] title = info['title'] def extract_bitrate(url): return int_or_none(self._search_regex( r'-([0-9]+)k\.', url, 'bitrate', default=None)) formats = [] streams = info.get('streams', {}) for format_id, format_url in streams.items(): if format_id == 'hds_sd' and 'hds' in streams: continue if format_id == 'hls_sd' and 'hls' in streams: continue ext = determine_ext(format_url) if ext in ('m3u8', 'f4m'): format_url = self._get_tokenized_src(format_url, media_id, format_id) if ext == 'f4m': formats.extend(self._extract_f4m_formats( format_url + ('?' if '?' not in format_url else '&') + 'hdcore=3.4.0', media_id, f4m_id=format_id, fatal=False)) else: formats.extend(self._extract_m3u8_formats( format_url, media_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False)) else: formats.append({ 'format_id': format_id, 'url': format_url, 'tbr': extract_bitrate(format_url), }) for media in info.get('media', []): media_url = media.get('url') if not media_url or re.match(r'https?://', media_url): continue rate = media.get('rate') ext = media.get('ext') or determine_ext(media_url, 'mp4') format_id = ext if rate: format_id += '-%dk' % rate formats.append({ 'format_id': format_id, 'url': 'http://download-video.rts.ch/' + media_url, 'tbr': rate or extract_bitrate(media_url), }) self._check_formats(formats, media_id) self._sort_formats(formats) duration = info.get('duration') or info.get('cutout') or info.get('cutduration') if isinstance(duration, compat_str): duration = parse_duration(duration) return { 'id': media_id, 'display_id': display_id, 'formats': formats, 'title': title, 'description': info.get('intro'), 'duration': duration, 'view_count': int_or_none(info.get('plays')), 'uploader': info.get('programName'), 'timestamp': parse_iso8601(info.get('broadcast_date')), 'thumbnail': unescapeHTML(info.get('preview_image_url')), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rtve.py
# coding: utf-8 from __future__ import unicode_literals import base64 import re import time from .common import InfoExtractor from ..compat import ( compat_b64decode, compat_struct_unpack, ) from ..utils import ( determine_ext, ExtractorError, float_or_none, remove_end, remove_start, sanitized_Request, std_headers, ) def _decrypt_url(png): encrypted_data = compat_b64decode(png) text_index = encrypted_data.find(b'tEXt') text_chunk = encrypted_data[text_index - 4:] length = compat_struct_unpack('!I', text_chunk[:4])[0] # Use bytearray to get integers when iterating in both python 2.x and 3.x data = bytearray(text_chunk[8:8 + length]) data = [chr(b) for b in data if b != 0] hash_index = data.index('#') alphabet_data = data[:hash_index] url_data = data[hash_index + 1:] if url_data[0] == 'H' and url_data[3] == '%': # remove useless HQ%% at the start url_data = url_data[4:] alphabet = [] e = 0 d = 0 for l in alphabet_data: if d == 0: alphabet.append(l) d = e = (e + 1) % 4 else: d -= 1 url = '' f = 0 e = 3 b = 1 for letter in url_data: if f == 0: l = int(letter) * 10 f = 1 else: if e == 0: l += int(letter) url += alphabet[l] e = (b + 3) % 4 f = 0 b += 1 else: e -= 1 return url class RTVEALaCartaIE(InfoExtractor): IE_NAME = 'rtve.es:alacarta' IE_DESC = 'RTVE a la carta' _VALID_URL = r'https?://(?:www\.)?rtve\.es/(m/)?(alacarta/videos|filmoteca)/[^/]+/[^/]+/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.rtve.es/alacarta/videos/balonmano/o-swiss-cup-masculina-final-espana-suecia/2491869/', 'md5': '1d49b7e1ca7a7502c56a4bf1b60f1b43', 'info_dict': { 'id': '2491869', 'ext': 'mp4', 'title': 'Balonmano - Swiss Cup masculina. Final: España-Suecia', 'duration': 5024.566, }, }, { 'note': 'Live stream', 'url': 'http://www.rtve.es/alacarta/videos/television/24h-live/1694255/', 'info_dict': { 'id': '1694255', 'ext': 'flv', 'title': 'TODO', }, 'skip': 'The f4m manifest can\'t be used yet', }, { 'url': 'http://www.rtve.es/alacarta/videos/servir-y-proteger/servir-proteger-capitulo-104/4236788/', 'md5': 'e55e162379ad587e9640eda4f7353c0f', 'info_dict': { 'id': '4236788', 'ext': 'mp4', 'title': 'Servir y proteger - Capítulo 104 ', 'duration': 3222.0, }, 'params': { 'skip_download': True, # requires ffmpeg }, }, { 'url': 'http://www.rtve.es/m/alacarta/videos/cuentame-como-paso/cuentame-como-paso-t16-ultimo-minuto-nuestra-vida-capitulo-276/2969138/?media=tve', 'only_matching': True, }, { 'url': 'http://www.rtve.es/filmoteca/no-do/not-1-introduccion-primer-noticiario-espanol/1465256/', 'only_matching': True, }] def _real_initialize(self): user_agent_b64 = base64.b64encode(std_headers['User-Agent'].encode('utf-8')).decode('utf-8') manager_info = self._download_json( 'http://www.rtve.es/odin/loki/' + user_agent_b64, None, 'Fetching manager info') self._manager = manager_info['manager'] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') info = self._download_json( 'http://www.rtve.es/api/videos/%s/config/alacarta_videos.json' % video_id, video_id)['page']['items'][0] if info['state'] == 'DESPU': raise ExtractorError('The video is no longer available', expected=True) title = info['title'] png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/%s/videos/%s.png' % (self._manager, video_id) png_request = sanitized_Request(png_url) png_request.add_header('Referer', url) png = self._download_webpage(png_request, video_id, 'Downloading url information') video_url = _decrypt_url(png) ext = determine_ext(video_url) formats = [] if not video_url.endswith('.f4m') and ext != 'm3u8': if '?' not in video_url: video_url = video_url.replace('resources/', 'auth/resources/') video_url = video_url.replace('.net.rtve', '.multimedia.cdn.rtve') if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id='hds', fatal=False)) else: formats.append({ 'url': video_url, }) self._sort_formats(formats) subtitles = None if info.get('sbtFile') is not None: subtitles = self.extract_subtitles(video_id, info['sbtFile']) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': info.get('image'), 'page_url': url, 'subtitles': subtitles, 'duration': float_or_none(info.get('duration'), scale=1000), } def _get_subtitles(self, video_id, sub_file): subs = self._download_json( sub_file + '.json', video_id, 'Downloading subtitles info')['page']['items'] return dict( (s['lang'], [{'ext': 'vtt', 'url': s['src']}]) for s in subs) class RTVEInfantilIE(InfoExtractor): IE_NAME = 'rtve.es:infantil' IE_DESC = 'RTVE infantil' _VALID_URL = r'https?://(?:www\.)?rtve\.es/infantil/serie/(?P<show>[^/]*)/video/(?P<short_title>[^/]*)/(?P<id>[0-9]+)/' _TESTS = [{ 'url': 'http://www.rtve.es/infantil/serie/cleo/video/maneras-vivir/3040283/', 'md5': '915319587b33720b8e0357caaa6617e6', 'info_dict': { 'id': '3040283', 'ext': 'mp4', 'title': 'Maneras de vivir', 'thumbnail': 'http://www.rtve.es/resources/jpg/6/5/1426182947956.JPG', 'duration': 357.958, }, }] def _real_extract(self, url): video_id = self._match_id(url) info = self._download_json( 'http://www.rtve.es/api/videos/%s/config/alacarta_videos.json' % video_id, video_id)['page']['items'][0] webpage = self._download_webpage(url, video_id) vidplayer_id = self._search_regex( r' id="vidplayer([0-9]+)"', webpage, 'internal video ID') png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/default/videos/%s.png' % vidplayer_id png = self._download_webpage(png_url, video_id, 'Downloading url information') video_url = _decrypt_url(png) return { 'id': video_id, 'ext': 'mp4', 'title': info['title'], 'url': video_url, 'thumbnail': info.get('image'), 'duration': float_or_none(info.get('duration'), scale=1000), } class RTVELiveIE(InfoExtractor): IE_NAME = 'rtve.es:live' IE_DESC = 'RTVE.es live streams' _VALID_URL = r'https?://(?:www\.)?rtve\.es/directo/(?P<id>[a-zA-Z0-9-]+)' _TESTS = [{ 'url': 'http://www.rtve.es/directo/la-1/', 'info_dict': { 'id': 'la-1', 'ext': 'mp4', 'title': 're:^La 1 [0-9]{4}-[0-9]{2}-[0-9]{2}Z[0-9]{6}$', }, 'params': { 'skip_download': 'live stream', } }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) start_time = time.gmtime() video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) title = remove_end(self._og_search_title(webpage), ' en directo en RTVE.es') title = remove_start(title, 'Estoy viendo ') title += ' ' + time.strftime('%Y-%m-%dZ%H%M%S', start_time) vidplayer_id = self._search_regex( (r'playerId=player([0-9]+)', r'class=["\'].*?\blive_mod\b.*?["\'][^>]+data-assetid=["\'](\d+)', r'data-id=["\'](\d+)'), webpage, 'internal video ID') png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/amonet/videos/%s.png' % vidplayer_id png = self._download_webpage(png_url, video_id, 'Downloading url information') m3u8_url = _decrypt_url(png) formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4') self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'is_live': True, } class RTVETelevisionIE(InfoExtractor): IE_NAME = 'rtve.es:television' _VALID_URL = r'https?://(?:www\.)?rtve\.es/television/[^/]+/[^/]+/(?P<id>\d+).shtml' _TEST = { 'url': 'http://www.rtve.es/television/20160628/revolucion-del-movil/1364141.shtml', 'info_dict': { 'id': '3069778', 'ext': 'mp4', 'title': 'Documentos TV - La revolución del móvil', 'duration': 3496.948, }, 'params': { 'skip_download': True, }, } def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) alacarta_url = self._search_regex( r'data-location="alacarta_videos"[^<]+url&quot;:&quot;(http://www\.rtve\.es/alacarta.+?)&', webpage, 'alacarta url', default=None) if alacarta_url is None: raise ExtractorError( 'The webpage doesn\'t contain any video', expected=True) return self.url_result(alacarta_url, ie=RTVEALaCartaIE.ie_key())
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rtvnh.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ExtractorError class RTVNHIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rtvnh\.nl/video/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.rtvnh.nl/video/131946', 'md5': 'cdbec9f44550763c8afc96050fa747dc', 'info_dict': { 'id': '131946', 'ext': 'mp4', 'title': 'Grote zoektocht in zee bij Zandvoort naar vermiste vrouw', 'thumbnail': r're:^https?:.*\.jpg$' } } def _real_extract(self, url): video_id = self._match_id(url) meta = self._parse_json(self._download_webpage( 'http://www.rtvnh.nl/video/json?m=' + video_id, video_id), video_id) status = meta.get('status') if status != 200: raise ExtractorError( '%s returned error code %d' % (self.IE_NAME, status), expected=True) formats = [] rtmp_formats = self._extract_smil_formats( 'http://www.rtvnh.nl/video/smil?m=' + video_id, video_id) formats.extend(rtmp_formats) for rtmp_format in rtmp_formats: rtmp_url = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path']) rtsp_format = rtmp_format.copy() del rtsp_format['play_path'] del rtsp_format['ext'] rtsp_format.update({ 'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'), 'url': rtmp_url.replace('rtmp://', 'rtsp://'), 'protocol': 'rtsp', }) formats.append(rtsp_format) http_base_url = rtmp_url.replace('rtmp://', 'http://') formats.extend(self._extract_m3u8_formats( http_base_url + '/playlist.m3u8', video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) formats.extend(self._extract_f4m_formats( http_base_url + '/manifest.f4m', video_id, f4m_id='hds', fatal=False)) self._sort_formats(formats) return { 'id': video_id, 'title': meta['title'].strip(), 'thumbnail': meta.get('image'), 'formats': formats }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rtvs.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class RTVSIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rtvs\.sk/(?:radio|televizia)/archiv/\d+/(?P<id>\d+)' _TESTS = [{ # radio archive 'url': 'http://www.rtvs.sk/radio/archiv/11224/414872', 'md5': '134d5d6debdeddf8a5d761cbc9edacb8', 'info_dict': { 'id': '414872', 'ext': 'mp3', 'title': 'Ostrov pokladov 1 časť.mp3' }, 'params': { 'skip_download': True, } }, { # tv archive 'url': 'http://www.rtvs.sk/televizia/archiv/8249/63118', 'md5': '85e2c55cf988403b70cac24f5c086dc6', 'info_dict': { 'id': '63118', 'ext': 'mp4', 'title': 'Amaro Džives - Náš deň', 'description': 'Galavečer pri príležitosti Medzinárodného dňa Rómov.' }, 'params': { 'skip_download': True, } }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) playlist_url = self._search_regex( r'playlist["\']?\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'playlist url', group='url') data = self._download_json( playlist_url, video_id, 'Downloading playlist')[0] return self._parse_jwplayer_data(data, video_id=video_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/ruhd.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class RUHDIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ruhd\.ru/play\.php\?vid=(?P<id>\d+)' _TEST = { 'url': 'http://www.ruhd.ru/play.php?vid=207', 'md5': 'd1a9ec4edf8598e3fbd92bb16072ba83', 'info_dict': { 'id': '207', 'ext': 'divx', 'title': 'КОТ бааааам', 'description': 'классный кот)', 'thumbnail': r're:^http://.*\.jpg$', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_url = self._html_search_regex( r'<param name="src" value="([^"]+)"', webpage, 'video url') title = self._html_search_regex( r'<title>([^<]+)&nbsp;&nbsp; RUHD\.ru - Видео Высокого качества №1 в России!</title>', webpage, 'title') description = self._html_search_regex( r'(?s)<div id="longdesc">(.+?)<span id="showlink">', webpage, 'description', fatal=False) thumbnail = self._html_search_regex( r'<param name="previewImage" value="([^"]+)"', webpage, 'thumbnail', fatal=False) if thumbnail: thumbnail = 'http://www.ruhd.ru' + thumbnail return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rutube.py
# coding: utf-8 from __future__ import unicode_literals import re import itertools from .common import InfoExtractor from ..compat import ( compat_str, compat_parse_qs, compat_urllib_parse_urlparse, ) from ..utils import ( determine_ext, bool_or_none, int_or_none, try_get, unified_timestamp, url_or_none, ) class RutubeBaseIE(InfoExtractor): def _download_api_info(self, video_id, query=None): if not query: query = {} query['format'] = 'json' return self._download_json( 'http://rutube.ru/api/video/%s/' % video_id, video_id, 'Downloading video JSON', 'Unable to download video JSON', query=query) @staticmethod def _extract_info(video, video_id=None, require_title=True): title = video['title'] if require_title else video.get('title') age_limit = video.get('is_adult') if age_limit is not None: age_limit = 18 if age_limit is True else 0 uploader_id = try_get(video, lambda x: x['author']['id']) category = try_get(video, lambda x: x['category']['name']) return { 'id': video.get('id') or video_id if video_id else video['id'], 'title': title, 'description': video.get('description'), 'thumbnail': video.get('thumbnail_url'), 'duration': int_or_none(video.get('duration')), 'uploader': try_get(video, lambda x: x['author']['name']), 'uploader_id': compat_str(uploader_id) if uploader_id else None, 'timestamp': unified_timestamp(video.get('created_ts')), 'category': [category] if category else None, 'age_limit': age_limit, 'view_count': int_or_none(video.get('hits')), 'comment_count': int_or_none(video.get('comments_count')), 'is_live': bool_or_none(video.get('is_livestream')), } def _download_and_extract_info(self, video_id, query=None): return self._extract_info( self._download_api_info(video_id, query=query), video_id) def _download_api_options(self, video_id, query=None): if not query: query = {} query['format'] = 'json' return self._download_json( 'http://rutube.ru/api/play/options/%s/' % video_id, video_id, 'Downloading options JSON', 'Unable to download options JSON', headers=self.geo_verification_headers(), query=query) def _extract_formats(self, options, video_id): formats = [] for format_id, format_url in options['video_balancer'].items(): ext = determine_ext(format_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( format_url, video_id, f4m_id=format_id, fatal=False)) else: formats.append({ 'url': format_url, 'format_id': format_id, }) self._sort_formats(formats) return formats def _download_and_extract_formats(self, video_id, query=None): return self._extract_formats( self._download_api_options(video_id, query=query), video_id) class RutubeIE(RutubeBaseIE): IE_NAME = 'rutube' IE_DESC = 'Rutube videos' _VALID_URL = r'https?://rutube\.ru/(?:video|(?:play/)?embed)/(?P<id>[\da-z]{32})' _TESTS = [{ 'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/', 'md5': '1d24f180fac7a02f3900712e5a5764d6', 'info_dict': { 'id': '3eac3b4561676c17df9132a9a1e62e3e', 'ext': 'mp4', 'title': 'Раненный кенгуру забежал в аптеку', 'description': 'http://www.ntdtv.ru ', 'duration': 81, 'uploader': 'NTDRussian', 'uploader_id': '29790', 'timestamp': 1381943602, 'upload_date': '20131016', 'age_limit': 0, }, }, { 'url': 'http://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661', 'only_matching': True, }, { 'url': 'http://rutube.ru/embed/a10e53b86e8f349080f718582ce4c661', 'only_matching': True, }, { 'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/?pl_id=4252', 'only_matching': True, }, { 'url': 'https://rutube.ru/video/10b3a03fc01d5bbcc632a2f3514e8aab/?pl_type=source', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if RutubePlaylistIE.suitable(url) else super(RutubeIE, cls).suitable(url) @staticmethod def _extract_urls(webpage): return [mobj.group('url') for mobj in re.finditer( r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//rutube\.ru/embed/[\da-z]{32}.*?)\1', webpage)] def _real_extract(self, url): video_id = self._match_id(url) info = self._download_and_extract_info(video_id) info['formats'] = self._download_and_extract_formats(video_id) return info class RutubeEmbedIE(RutubeBaseIE): IE_NAME = 'rutube:embed' IE_DESC = 'Rutube embedded videos' _VALID_URL = r'https?://rutube\.ru/(?:video|play)/embed/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://rutube.ru/video/embed/6722881?vk_puid37=&vk_puid38=', 'info_dict': { 'id': 'a10e53b86e8f349080f718582ce4c661', 'ext': 'mp4', 'timestamp': 1387830582, 'upload_date': '20131223', 'uploader_id': '297833', 'description': 'Видео группы ★http://vk.com/foxkidsreset★ музей Fox Kids и Jetix<br/><br/> восстановлено и сделано в шикоформате subziro89 http://vk.com/subziro89', 'uploader': 'subziro89 ILya', 'title': 'Мистический городок Эйри в Индиан 5 серия озвучка subziro89', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://rutube.ru/play/embed/8083783', 'only_matching': True, }, { # private video 'url': 'https://rutube.ru/play/embed/10631925?p=IbAigKqWd1do4mjaM5XLIQ', 'only_matching': True, }] def _real_extract(self, url): embed_id = self._match_id(url) # Query may contain private videos token and should be passed to API # requests (see #19163) query = compat_parse_qs(compat_urllib_parse_urlparse(url).query) options = self._download_api_options(embed_id, query) video_id = options['effective_video'] formats = self._extract_formats(options, video_id) info = self._download_and_extract_info(video_id, query) info.update({ 'extractor_key': 'Rutube', 'formats': formats, }) return info class RutubePlaylistBaseIE(RutubeBaseIE): def _next_page_url(self, page_num, playlist_id, *args, **kwargs): return self._PAGE_TEMPLATE % (playlist_id, page_num) def _entries(self, playlist_id, *args, **kwargs): next_page_url = None for pagenum in itertools.count(1): page = self._download_json( next_page_url or self._next_page_url( pagenum, playlist_id, *args, **kwargs), playlist_id, 'Downloading page %s' % pagenum) results = page.get('results') if not results or not isinstance(results, list): break for result in results: video_url = url_or_none(result.get('video_url')) if not video_url: continue entry = self._extract_info(result, require_title=False) entry.update({ '_type': 'url', 'url': video_url, 'ie_key': RutubeIE.ie_key(), }) yield entry next_page_url = page.get('next') if not next_page_url or not page.get('has_next'): break def _extract_playlist(self, playlist_id, *args, **kwargs): return self.playlist_result( self._entries(playlist_id, *args, **kwargs), playlist_id, kwargs.get('playlist_name')) def _real_extract(self, url): return self._extract_playlist(self._match_id(url)) class RutubeChannelIE(RutubePlaylistBaseIE): IE_NAME = 'rutube:channel' IE_DESC = 'Rutube channels' _VALID_URL = r'https?://rutube\.ru/tags/video/(?P<id>\d+)' _TESTS = [{ 'url': 'http://rutube.ru/tags/video/1800/', 'info_dict': { 'id': '1800', }, 'playlist_mincount': 68, }] _PAGE_TEMPLATE = 'http://rutube.ru/api/tags/video/%s/?page=%s&format=json' class RutubeMovieIE(RutubePlaylistBaseIE): IE_NAME = 'rutube:movie' IE_DESC = 'Rutube movies' _VALID_URL = r'https?://rutube\.ru/metainfo/tv/(?P<id>\d+)' _TESTS = [] _MOVIE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/?format=json' _PAGE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json' def _real_extract(self, url): movie_id = self._match_id(url) movie = self._download_json( self._MOVIE_TEMPLATE % movie_id, movie_id, 'Downloading movie JSON') return self._extract_playlist( movie_id, playlist_name=movie.get('name')) class RutubePersonIE(RutubePlaylistBaseIE): IE_NAME = 'rutube:person' IE_DESC = 'Rutube person videos' _VALID_URL = r'https?://rutube\.ru/video/person/(?P<id>\d+)' _TESTS = [{ 'url': 'http://rutube.ru/video/person/313878/', 'info_dict': { 'id': '313878', }, 'playlist_mincount': 37, }] _PAGE_TEMPLATE = 'http://rutube.ru/api/video/person/%s/?page=%s&format=json' class RutubePlaylistIE(RutubePlaylistBaseIE): IE_NAME = 'rutube:playlist' IE_DESC = 'Rutube playlists' _VALID_URL = r'https?://rutube\.ru/(?:video|(?:play/)?embed)/[\da-z]{32}/\?.*?\bpl_id=(?P<id>\d+)' _TESTS = [{ 'url': 'https://rutube.ru/video/cecd58ed7d531fc0f3d795d51cee9026/?pl_id=3097&pl_type=tag', 'info_dict': { 'id': '3097', }, 'playlist_count': 27, }, { 'url': 'https://rutube.ru/video/10b3a03fc01d5bbcc632a2f3514e8aab/?pl_id=4252&pl_type=source', 'only_matching': True, }] _PAGE_TEMPLATE = 'http://rutube.ru/api/playlist/%s/%s/?page=%s&format=json' @classmethod def suitable(cls, url): if not super(RutubePlaylistIE, cls).suitable(url): return False params = compat_parse_qs(compat_urllib_parse_urlparse(url).query) return params.get('pl_type', [None])[0] and int_or_none(params.get('pl_id', [None])[0]) def _next_page_url(self, page_num, playlist_id, item_kind): return self._PAGE_TEMPLATE % (item_kind, playlist_id, page_num) def _real_extract(self, url): qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query) playlist_kind = qs['pl_type'][0] playlist_id = qs['pl_id'][0] return self._extract_playlist(playlist_id, item_kind=playlist_kind)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/rutv.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none ) class RUTVIE(InfoExtractor): IE_DESC = 'RUTV.RU' _VALID_URL = r'''(?x) https?:// (?:test)?player\.(?:rutv\.ru|vgtrk\.com)/ (?P<path> flash\d+v/container\.swf\?id=| iframe/(?P<type>swf|video|live)/id/| index/iframe/cast_id/ ) (?P<id>\d+) ''' _TESTS = [ { 'url': 'http://player.rutv.ru/flash2v/container.swf?id=774471&sid=kultura&fbv=true&isPlay=true&ssl=false&i=560&acc_video_id=episode_id/972347/video_id/978186/brand_id/31724', 'info_dict': { 'id': '774471', 'ext': 'mp4', 'title': 'Монологи на все времена', 'description': 'md5:18d8b5e6a41fb1faa53819471852d5d5', 'duration': 2906, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://player.vgtrk.com/flash2v/container.swf?id=774016&sid=russiatv&fbv=true&isPlay=true&ssl=false&i=560&acc_video_id=episode_id/972098/video_id/977760/brand_id/57638', 'info_dict': { 'id': '774016', 'ext': 'mp4', 'title': 'Чужой в семье Сталина', 'description': '', 'duration': 2539, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://player.rutv.ru/iframe/swf/id/766888/sid/hitech/?acc_video_id=4000', 'info_dict': { 'id': '766888', 'ext': 'mp4', 'title': 'Вести.net: интернет-гиганты начали перетягивание программных "одеял"', 'description': 'md5:65ddd47f9830c4f42ed6475f8730c995', 'duration': 279, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://player.rutv.ru/iframe/video/id/771852/start_zoom/true/showZoomBtn/false/sid/russiatv/?acc_video_id=episode_id/970443/video_id/975648/brand_id/5169', 'info_dict': { 'id': '771852', 'ext': 'mp4', 'title': 'Прямой эфир. Жертвы загадочной болезни: смерть от старости в 17 лет', 'description': 'md5:b81c8c55247a4bd996b43ce17395b2d8', 'duration': 3096, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://player.rutv.ru/iframe/live/id/51499/showZoomBtn/false/isPlay/true/sid/sochi2014', 'info_dict': { 'id': '51499', 'ext': 'flv', 'title': 'Сочи-2014. Биатлон. Индивидуальная гонка. Мужчины ', 'description': 'md5:9e0ed5c9d2fa1efbfdfed90c9a6d179c', }, 'skip': 'Translation has finished', }, { 'url': 'http://player.rutv.ru/iframe/live/id/21/showZoomBtn/false/isPlay/true/', 'info_dict': { 'id': '21', 'ext': 'mp4', 'title': 're:^Россия 24. Прямой эфир [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'is_live': True, }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://testplayer.vgtrk.com/iframe/live/id/19201/showZoomBtn/false/isPlay/true/', 'only_matching': True, }, ] @classmethod def _extract_url(cls, webpage): mobj = re.search( r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:test)?player\.(?:rutv\.ru|vgtrk\.com)/(?:iframe/(?:swf|video|live)/id|index/iframe/cast_id)/.+?)\1', webpage) if mobj: return mobj.group('url') mobj = re.search( r'<meta[^>]+?property=(["\'])og:video\1[^>]+?content=(["\'])(?P<url>https?://(?:test)?player\.(?:rutv\.ru|vgtrk\.com)/flash\d+v/container\.swf\?id=.+?\2)', webpage) if mobj: return mobj.group('url') def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') video_path = mobj.group('path') if re.match(r'flash\d+v', video_path): video_type = 'video' elif video_path.startswith('iframe'): video_type = mobj.group('type') if video_type == 'swf': video_type = 'video' elif video_path.startswith('index/iframe/cast_id'): video_type = 'live' is_live = video_type == 'live' json_data = self._download_json( 'http://player.rutv.ru/iframe/data%s/id/%s' % ('live' if is_live else 'video', video_id), video_id, 'Downloading JSON') if json_data['errors']: raise ExtractorError('%s said: %s' % (self.IE_NAME, json_data['errors']), expected=True) playlist = json_data['data']['playlist'] medialist = playlist['medialist'] media = medialist[0] if media['errors']: raise ExtractorError('%s said: %s' % (self.IE_NAME, media['errors']), expected=True) view_count = playlist.get('count_views') priority_transport = playlist['priority_transport'] thumbnail = media['picture'] width = int_or_none(media['width']) height = int_or_none(media['height']) description = media['anons'] title = media['title'] duration = int_or_none(media.get('duration')) formats = [] for transport, links in media['sources'].items(): for quality, url in links.items(): preference = -1 if priority_transport == transport else -2 if transport == 'rtmp': mobj = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+))/(?P<playpath>.+)$', url) if not mobj: continue fmt = { 'url': mobj.group('url'), 'play_path': mobj.group('playpath'), 'app': mobj.group('app'), 'page_url': 'http://player.rutv.ru', 'player_url': 'http://player.rutv.ru/flash3v/osmf.swf?i=22', 'rtmp_live': True, 'ext': 'flv', 'vbr': int(quality), 'preference': preference, } elif transport == 'm3u8': formats.extend(self._extract_m3u8_formats( url, video_id, 'mp4', preference=preference, m3u8_id='hls')) continue else: fmt = { 'url': url } fmt.update({ 'width': width, 'height': height, 'format_id': '%s-%s' % (transport, quality), }) formats.append(fmt) self._sort_formats(formats) return { 'id': video_id, 'title': self._live_title(title) if is_live else title, 'description': description, 'thumbnail': thumbnail, 'view_count': view_count, 'duration': duration, 'formats': formats, 'is_live': is_live, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/ruutu.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urllib_parse_urlparse from ..utils import ( determine_ext, ExtractorError, int_or_none, xpath_attr, xpath_text, ) class RuutuIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:ruutu|supla)\.fi/(?:video|supla)/(?P<id>\d+)' _TESTS = [ { 'url': 'http://www.ruutu.fi/video/2058907', 'md5': 'ab2093f39be1ca8581963451b3c0234f', 'info_dict': { 'id': '2058907', 'ext': 'mp4', 'title': 'Oletko aina halunnut tietää mitä tapahtuu vain hetki ennen lähetystä? - Nyt se selvisi!', 'description': 'md5:cfc6ccf0e57a814360df464a91ff67d6', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 114, 'age_limit': 0, }, }, { 'url': 'http://www.ruutu.fi/video/2057306', 'md5': '065a10ae4d5b8cfd9d0c3d332465e3d9', 'info_dict': { 'id': '2057306', 'ext': 'mp4', 'title': 'Superpesis: katso koko kausi Ruudussa', 'description': 'md5:bfb7336df2a12dc21d18fa696c9f8f23', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 40, 'age_limit': 0, }, }, { 'url': 'http://www.supla.fi/supla/2231370', 'md5': 'df14e782d49a2c0df03d3be2a54ef949', 'info_dict': { 'id': '2231370', 'ext': 'mp4', 'title': 'Osa 1: Mikael Jungner', 'description': 'md5:7d90f358c47542e3072ff65d7b1bcffe', 'thumbnail': r're:^https?://.*\.jpg$', 'age_limit': 0, }, }, # Episode where <SourceFile> is "NOT-USED", but has other # downloadable sources available. { 'url': 'http://www.ruutu.fi/video/3193728', 'only_matching': True, }, { # audio podcast 'url': 'https://www.supla.fi/supla/3382410', 'md5': 'b9d7155fed37b2ebf6021d74c4b8e908', 'info_dict': { 'id': '3382410', 'ext': 'mp3', 'title': 'Mikä ihmeen poltergeist?', 'description': 'md5:bbb6963df17dfd0ecd9eb9a61bf14b52', 'thumbnail': r're:^https?://.*\.jpg$', 'age_limit': 0, }, 'expected_warnings': ['HTTP Error 502: Bad Gateway'], } ] def _real_extract(self, url): video_id = self._match_id(url) video_xml = self._download_xml( 'https://gatling.nelonenmedia.fi/media-xml-cache', video_id, query={'id': video_id}) formats = [] processed_urls = [] def extract_formats(node): for child in node: if child.tag.endswith('Files'): extract_formats(child) elif child.tag.endswith('File'): video_url = child.text if (not video_url or video_url in processed_urls or any(p in video_url for p in ('NOT_USED', 'NOT-USED'))): continue processed_urls.append(video_url) ext = determine_ext(video_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id='hds', fatal=False)) elif ext == 'mpd': # video-only and audio-only streams are of different # duration resulting in out of sync issue continue formats.extend(self._extract_mpd_formats( video_url, video_id, mpd_id='dash', fatal=False)) elif ext == 'mp3' or child.tag == 'AudioMediaFile': formats.append({ 'format_id': 'audio', 'url': video_url, 'vcodec': 'none', }) else: proto = compat_urllib_parse_urlparse(video_url).scheme if not child.tag.startswith('HTTP') and proto != 'rtmp': continue preference = -1 if proto == 'rtmp' else 1 label = child.get('label') tbr = int_or_none(child.get('bitrate')) format_id = '%s-%s' % (proto, label if label else tbr) if label or tbr else proto if not self._is_valid_url(video_url, video_id, format_id): continue width, height = [int_or_none(x) for x in child.get('resolution', 'x').split('x')[:2]] formats.append({ 'format_id': format_id, 'url': video_url, 'width': width, 'height': height, 'tbr': tbr, 'preference': preference, }) extract_formats(video_xml.find('./Clip')) drm = xpath_text(video_xml, './Clip/DRM', default=None) if not formats and drm: raise ExtractorError('This video is DRM protected.', expected=True) self._sort_formats(formats) return { 'id': video_id, 'title': xpath_attr(video_xml, './/Behavior/Program', 'program_name', 'title', fatal=True), 'description': xpath_attr(video_xml, './/Behavior/Program', 'description', 'description'), 'thumbnail': xpath_attr(video_xml, './/Behavior/Startpicture', 'href', 'thumbnail'), 'duration': int_or_none(xpath_text(video_xml, './/Runtime', 'duration')), 'age_limit': int_or_none(xpath_text(video_xml, './/AgeLimit', 'age limit')), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/ruv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, unified_timestamp, ) class RuvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ruv\.is/(?:sarpurinn/[^/]+|node)/(?P<id>[^/]+(?:/\d+)?)' _TESTS = [{ # m3u8 'url': 'http://ruv.is/sarpurinn/ruv-aukaras/fh-valur/20170516', 'md5': '66347652f4e13e71936817102acc1724', 'info_dict': { 'id': '1144499', 'display_id': 'fh-valur/20170516', 'ext': 'mp4', 'title': 'FH - Valur', 'description': 'Bein útsending frá 3. leik FH og Vals í úrslitum Olísdeildar karla í handbolta.', 'timestamp': 1494963600, 'upload_date': '20170516', }, }, { # mp3 'url': 'http://ruv.is/sarpurinn/ras-2/morgunutvarpid/20170619', 'md5': '395ea250c8a13e5fdb39d4670ef85378', 'info_dict': { 'id': '1153630', 'display_id': 'morgunutvarpid/20170619', 'ext': 'mp3', 'title': 'Morgunútvarpið', 'description': 'md5:a4cf1202c0a1645ca096b06525915418', 'timestamp': 1497855000, 'upload_date': '20170619', }, }, { 'url': 'http://ruv.is/sarpurinn/ruv/frettir/20170614', 'only_matching': True, }, { 'url': 'http://www.ruv.is/node/1151854', 'only_matching': True, }, { 'url': 'http://ruv.is/sarpurinn/klippa/secret-soltice-hefst-a-morgun', 'only_matching': True, }, { 'url': 'http://ruv.is/sarpurinn/ras-1/morgunvaktin/20170619', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = self._og_search_title(webpage) FIELD_RE = r'video\.%s\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1' media_url = self._html_search_regex( FIELD_RE % 'src', webpage, 'video URL', group='url') video_id = self._search_regex( r'<link\b[^>]+\bhref=["\']https?://www\.ruv\.is/node/(\d+)', webpage, 'video id', default=display_id) ext = determine_ext(media_url) if ext == 'm3u8': formats = self._extract_m3u8_formats( media_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') elif ext == 'mp3': formats = [{ 'format_id': 'mp3', 'url': media_url, 'vcodec': 'none', }] else: formats = [{ 'url': media_url, }] description = self._og_search_description(webpage, default=None) thumbnail = self._og_search_thumbnail( webpage, default=None) or self._search_regex( FIELD_RE % 'poster', webpage, 'thumbnail', fatal=False) timestamp = unified_timestamp(self._html_search_meta( 'article:published_time', webpage, 'timestamp', fatal=False)) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/safari.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_str, compat_urlparse, ) from ..utils import ( ExtractorError, update_url_query, ) class SafariBaseIE(InfoExtractor): _LOGIN_URL = 'https://learning.oreilly.com/accounts/login/' _NETRC_MACHINE = 'safari' _API_BASE = 'https://learning.oreilly.com/api/v1' _API_FORMAT = 'json' LOGGED_IN = False def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return _, urlh = self._download_webpage_handle( 'https://learning.oreilly.com/accounts/login-check/', None, 'Downloading login page') def is_logged(urlh): return 'learning.oreilly.com/home/' in compat_str(urlh.geturl()) if is_logged(urlh): self.LOGGED_IN = True return redirect_url = compat_str(urlh.geturl()) parsed_url = compat_urlparse.urlparse(redirect_url) qs = compat_parse_qs(parsed_url.query) next_uri = compat_urlparse.urljoin( 'https://api.oreilly.com', qs['next'][0]) auth, urlh = self._download_json_handle( 'https://www.oreilly.com/member/auth/login/', None, 'Logging in', data=json.dumps({ 'email': username, 'password': password, 'redirect_uri': next_uri, }).encode(), headers={ 'Content-Type': 'application/json', 'Referer': redirect_url, }, expected_status=400) credentials = auth.get('credentials') if (not auth.get('logged_in') and not auth.get('redirect_uri') and credentials): raise ExtractorError( 'Unable to login: %s' % credentials, expected=True) # oreilly serves two same instances of the following cookies # in Set-Cookie header and expects first one to be actually set for cookie in ('groot_sessionid', 'orm-jwt', 'orm-rt'): self._apply_first_set_cookie_header(urlh, cookie) _, urlh = self._download_webpage_handle( auth.get('redirect_uri') or next_uri, None, 'Completing login',) if is_logged(urlh): self.LOGGED_IN = True return raise ExtractorError('Unable to log in') class SafariIE(SafariBaseIE): IE_NAME = 'safari' IE_DESC = 'safaribooksonline.com online video' _VALID_URL = r'''(?x) https?:// (?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/ (?: library/view/[^/]+/(?P<course_id>[^/]+)/(?P<part>[^/?\#&]+)\.html| videos/[^/]+/[^/]+/(?P<reference_id>[^-]+-[^/?\#&]+) ) ''' _TESTS = [{ 'url': 'https://www.safaribooksonline.com/library/view/hadoop-fundamentals-livelessons/9780133392838/part00.html', 'md5': 'dcc5a425e79f2564148652616af1f2a3', 'info_dict': { 'id': '0_qbqx90ic', 'ext': 'mp4', 'title': 'Introduction to Hadoop Fundamentals LiveLessons', 'timestamp': 1437758058, 'upload_date': '20150724', 'uploader_id': 'stork', }, }, { # non-digits in course id 'url': 'https://www.safaribooksonline.com/library/view/create-a-nodejs/100000006A0210/part00.html', 'only_matching': True, }, { 'url': 'https://www.safaribooksonline.com/library/view/learning-path-red/9780134664057/RHCE_Introduction.html', 'only_matching': True, }, { 'url': 'https://www.safaribooksonline.com/videos/python-programming-language/9780134217314/9780134217314-PYMC_13_00', 'only_matching': True, }, { 'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838/9780133392838-00_SeriesIntro', 'only_matching': True, }, { 'url': 'https://www.oreilly.com/library/view/hadoop-fundamentals-livelessons/9780133392838/00_SeriesIntro.html', 'only_matching': True, }] _PARTNER_ID = '1926081' _UICONF_ID = '29375172' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) reference_id = mobj.group('reference_id') if reference_id: video_id = reference_id partner_id = self._PARTNER_ID ui_id = self._UICONF_ID else: video_id = '%s-%s' % (mobj.group('course_id'), mobj.group('part')) webpage, urlh = self._download_webpage_handle(url, video_id) mobj = re.match(self._VALID_URL, urlh.geturl()) reference_id = mobj.group('reference_id') if not reference_id: reference_id = self._search_regex( r'data-reference-id=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'kaltura reference id', group='id') partner_id = self._search_regex( r'data-partner-id=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'kaltura widget id', default=self._PARTNER_ID, group='id') ui_id = self._search_regex( r'data-ui-id=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'kaltura uiconf id', default=self._UICONF_ID, group='id') query = { 'wid': '_%s' % partner_id, 'uiconf_id': ui_id, 'flashvars[referenceId]': reference_id, } if self.LOGGED_IN: kaltura_session = self._download_json( '%s/player/kaltura_session/?reference_id=%s' % (self._API_BASE, reference_id), video_id, 'Downloading kaltura session JSON', 'Unable to download kaltura session JSON', fatal=False) if kaltura_session: session = kaltura_session.get('session') if session: query['flashvars[ks]'] = session return self.url_result(update_url_query( 'https://cdnapisec.kaltura.com/html5/html5lib/v2.37.1/mwEmbedFrame.php', query), 'Kaltura') class SafariApiIE(SafariBaseIE): IE_NAME = 'safari:api' _VALID_URL = r'https?://(?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/api/v1/book/(?P<course_id>[^/]+)/chapter(?:-content)?/(?P<part>[^/?#&]+)\.html' _TESTS = [{ 'url': 'https://www.safaribooksonline.com/api/v1/book/9780133392838/chapter/part00.html', 'only_matching': True, }, { 'url': 'https://www.safaribooksonline.com/api/v1/book/9780134664057/chapter/RHCE_Introduction.html', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) part = self._download_json( url, '%s/%s' % (mobj.group('course_id'), mobj.group('part')), 'Downloading part JSON') return self.url_result(part['web_url'], SafariIE.ie_key()) class SafariCourseIE(SafariBaseIE): IE_NAME = 'safari:course' IE_DESC = 'safaribooksonline.com online courses' _VALID_URL = r'''(?x) https?:// (?: (?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/ (?: library/view/[^/]+| api/v1/book| videos/[^/]+ )| techbus\.safaribooksonline\.com ) /(?P<id>[^/]+) ''' _TESTS = [{ 'url': 'https://www.safaribooksonline.com/library/view/hadoop-fundamentals-livelessons/9780133392838/', 'info_dict': { 'id': '9780133392838', 'title': 'Hadoop Fundamentals LiveLessons', }, 'playlist_count': 22, 'skip': 'Requires safaribooksonline account credentials', }, { 'url': 'https://www.safaribooksonline.com/api/v1/book/9781449396459/?override_format=json', 'only_matching': True, }, { 'url': 'http://techbus.safaribooksonline.com/9780134426365', 'only_matching': True, }, { 'url': 'https://www.safaribooksonline.com/videos/python-programming-language/9780134217314', 'only_matching': True, }, { 'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838', 'only_matching': True, }, { 'url': 'https://www.oreilly.com/library/view/hadoop-fundamentals-livelessons/9780133392838/', 'only_matching': True, }] @classmethod def suitable(cls, url): return (False if SafariIE.suitable(url) or SafariApiIE.suitable(url) else super(SafariCourseIE, cls).suitable(url)) def _real_extract(self, url): course_id = self._match_id(url) course_json = self._download_json( '%s/book/%s/?override_format=%s' % (self._API_BASE, course_id, self._API_FORMAT), course_id, 'Downloading course JSON') if 'chapters' not in course_json: raise ExtractorError( 'No chapters found for course %s' % course_id, expected=True) entries = [ self.url_result(chapter, SafariApiIE.ie_key()) for chapter in course_json['chapters']] course_title = course_json['title'] return self.playlist_result(entries, course_id, course_title)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sapo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( parse_duration, unified_strdate, ) class SapoIE(InfoExtractor): IE_DESC = 'SAPO Vídeos' _VALID_URL = r'https?://(?:(?:v2|www)\.)?videos\.sapo\.(?:pt|cv|ao|mz|tl)/(?P<id>[\da-zA-Z]{20})' _TESTS = [ { 'url': 'http://videos.sapo.pt/UBz95kOtiWYUMTA5Ghfi', 'md5': '79ee523f6ecb9233ac25075dee0eda83', 'note': 'SD video', 'info_dict': { 'id': 'UBz95kOtiWYUMTA5Ghfi', 'ext': 'mp4', 'title': 'Benfica - Marcas na Hitória', 'description': 'md5:c9082000a128c3fd57bf0299e1367f22', 'duration': 264, 'uploader': 'tiago_1988', 'upload_date': '20080229', 'categories': ['benfica', 'cabral', 'desporto', 'futebol', 'geovanni', 'hooijdonk', 'joao', 'karel', 'lisboa', 'miccoli'], }, }, { 'url': 'http://videos.sapo.pt/IyusNAZ791ZdoCY5H5IF', 'md5': '90a2f283cfb49193fe06e861613a72aa', 'note': 'HD video', 'info_dict': { 'id': 'IyusNAZ791ZdoCY5H5IF', 'ext': 'mp4', 'title': 'Codebits VII - Report', 'description': 'md5:6448d6fd81ce86feac05321f354dbdc8', 'duration': 144, 'uploader': 'codebits', 'upload_date': '20140427', 'categories': ['codebits', 'codebits2014'], }, }, { 'url': 'http://v2.videos.sapo.pt/yLqjzPtbTimsn2wWBKHz', 'md5': 'e5aa7cc0bdc6db9b33df1a48e49a15ac', 'note': 'v2 video', 'info_dict': { 'id': 'yLqjzPtbTimsn2wWBKHz', 'ext': 'mp4', 'title': 'Hipnose Condicionativa 4', 'description': 'md5:ef0481abf8fb4ae6f525088a6dadbc40', 'duration': 692, 'uploader': 'sapozen', 'upload_date': '20090609', 'categories': ['condicionativa', 'heloisa', 'hipnose', 'miranda', 'sapo', 'zen'], }, }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') item = self._download_xml( 'http://rd3.videos.sapo.pt/%s/rss2' % video_id, video_id).find('./channel/item') title = item.find('./title').text description = item.find('./{http://videos.sapo.pt/mrss/}synopse').text thumbnail = item.find('./{http://search.yahoo.com/mrss/}content').get('url') duration = parse_duration(item.find('./{http://videos.sapo.pt/mrss/}time').text) uploader = item.find('./{http://videos.sapo.pt/mrss/}author').text upload_date = unified_strdate(item.find('./pubDate').text) view_count = int(item.find('./{http://videos.sapo.pt/mrss/}views').text) comment_count = int(item.find('./{http://videos.sapo.pt/mrss/}comment_count').text) tags = item.find('./{http://videos.sapo.pt/mrss/}tags').text categories = tags.split() if tags else [] age_limit = 18 if item.find('./{http://videos.sapo.pt/mrss/}m18').text == 'true' else 0 video_url = item.find('./{http://videos.sapo.pt/mrss/}videoFile').text video_size = item.find('./{http://videos.sapo.pt/mrss/}videoSize').text.split('x') formats = [{ 'url': video_url, 'ext': 'mp4', 'format_id': 'sd', 'width': int(video_size[0]), 'height': int(video_size[1]), }] if item.find('./{http://videos.sapo.pt/mrss/}HD').text == 'true': formats.append({ 'url': re.sub(r'/mov/1$', '/mov/39', video_url), 'ext': 'mp4', 'format_id': 'hd', 'width': 1280, 'height': 720, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'uploader': uploader, 'upload_date': upload_date, 'view_count': view_count, 'comment_count': comment_count, 'categories': categories, 'age_limit': age_limit, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/savefrom.py
# coding: utf-8 from __future__ import unicode_literals import os.path import re from .common import InfoExtractor class SaveFromIE(InfoExtractor): IE_NAME = 'savefrom.net' _VALID_URL = r'https?://[^.]+\.savefrom\.net/\#url=(?P<url>.*)$' _TEST = { 'url': 'http://en.savefrom.net/#url=http://youtube.com/watch?v=UlVRAPW2WJY&utm_source=youtube.com&utm_medium=short_domains&utm_campaign=ssyoutube.com', 'info_dict': { 'id': 'UlVRAPW2WJY', 'ext': 'mp4', 'title': 'About Team Radical MMA | MMA Fighting', 'upload_date': '20120816', 'uploader': 'Howcast', 'uploader_id': 'Howcast', 'description': r're:(?s).* Hi, my name is Rene Dreifuss\. And I\'m here to show you some MMA.*', }, 'params': { 'skip_download': True } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = os.path.splitext(url.split('/')[-1])[0] return self.url_result(mobj.group('url'), video_id=video_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sbs.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( smuggle_url, ExtractorError, ) class SBSIE(InfoExtractor): IE_DESC = 'sbs.com.au' _VALID_URL = r'https?://(?:www\.)?sbs\.com\.au/(?:ondemand|news)/video/(?:single/)?(?P<id>[0-9]+)' _TESTS = [{ # Original URL is handled by the generic IE which finds the iframe: # http://www.sbs.com.au/thefeed/blog/2014/08/21/dingo-conservation 'url': 'http://www.sbs.com.au/ondemand/video/single/320403011771/?source=drupal&vertical=thefeed', 'md5': '3150cf278965eeabb5b4cea1c963fe0a', 'info_dict': { 'id': '320403011771', 'ext': 'mp4', 'title': 'Dingo Conservation (The Feed)', 'description': 'md5:f250a9856fca50d22dec0b5b8015f8a5', 'thumbnail': r're:http://.*\.jpg', 'duration': 308, 'timestamp': 1408613220, 'upload_date': '20140821', 'uploader': 'SBSC', }, }, { 'url': 'http://www.sbs.com.au/ondemand/video/320403011771/Dingo-Conservation-The-Feed', 'only_matching': True, }, { 'url': 'http://www.sbs.com.au/news/video/471395907773/The-Feed-July-9', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) player_params = self._download_json( 'http://www.sbs.com.au/api/video_pdkvars/id/%s?form=json' % video_id, video_id) error = player_params.get('error') if error: error_message = 'Sorry, The video you are looking for does not exist.' video_data = error.get('results') or {} error_code = error.get('errorCode') if error_code == 'ComingSoon': error_message = '%s is not yet available.' % video_data.get('title', '') elif error_code in ('Forbidden', 'intranetAccessOnly'): error_message = 'Sorry, This video cannot be accessed via this website' elif error_code == 'Expired': error_message = 'Sorry, %s is no longer available.' % video_data.get('title', '') raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True) urls = player_params['releaseUrls'] theplatform_url = (urls.get('progressive') or urls.get('html') or urls.get('standard') or player_params['relatedItemsURL']) return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'id': video_id, 'url': smuggle_url(self._proto_relative_url(theplatform_url), {'force_smil_url': True}), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/screencast.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urllib_request, ) from ..utils import ( ExtractorError, ) class ScreencastIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?screencast\.com/t/(?P<id>[a-zA-Z0-9]+)' _TESTS = [{ 'url': 'http://www.screencast.com/t/3ZEjQXlT', 'md5': '917df1c13798a3e96211dd1561fded83', 'info_dict': { 'id': '3ZEjQXlT', 'ext': 'm4v', 'title': 'Color Measurement with Ocean Optics Spectrometers', 'description': 'md5:240369cde69d8bed61349a199c5fb153', 'thumbnail': r're:^https?://.*\.(?:gif|jpg)$', } }, { 'url': 'http://www.screencast.com/t/V2uXehPJa1ZI', 'md5': 'e8e4b375a7660a9e7e35c33973410d34', 'info_dict': { 'id': 'V2uXehPJa1ZI', 'ext': 'mov', 'title': 'The Amadeus Spectrometer', 'description': 're:^In this video, our friends at.*To learn more about Amadeus, visit', 'thumbnail': r're:^https?://.*\.(?:gif|jpg)$', } }, { 'url': 'http://www.screencast.com/t/aAB3iowa', 'md5': 'dedb2734ed00c9755761ccaee88527cd', 'info_dict': { 'id': 'aAB3iowa', 'ext': 'mp4', 'title': 'Google Earth Export', 'description': 'Provides a demo of a CommunityViz export to Google Earth, one of the 3D viewing options.', 'thumbnail': r're:^https?://.*\.(?:gif|jpg)$', } }, { 'url': 'http://www.screencast.com/t/X3ddTrYh', 'md5': '669ee55ff9c51988b4ebc0877cc8b159', 'info_dict': { 'id': 'X3ddTrYh', 'ext': 'wmv', 'title': 'Toolkit 6 User Group Webinar (2014-03-04) - Default Judgment and First Impression', 'description': 'md5:7b9f393bc92af02326a5c5889639eab0', 'thumbnail': r're:^https?://.*\.(?:gif|jpg)$', } }, { 'url': 'http://screencast.com/t/aAB3iowa', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_url = self._html_search_regex( r'<embed name="Video".*?src="([^"]+)"', webpage, 'QuickTime embed', default=None) if video_url is None: flash_vars_s = self._html_search_regex( r'<param name="flashVars" value="([^"]+)"', webpage, 'flash vars', default=None) if not flash_vars_s: flash_vars_s = self._html_search_regex( r'<param name="initParams" value="([^"]+)"', webpage, 'flash vars', default=None) if flash_vars_s: flash_vars_s = flash_vars_s.replace(',', '&') if flash_vars_s: flash_vars = compat_parse_qs(flash_vars_s) video_url_raw = compat_urllib_request.quote( flash_vars['content'][0]) video_url = video_url_raw.replace('http%3A', 'http:') if video_url is None: video_meta = self._html_search_meta( 'og:video', webpage, default=None) if video_meta: video_url = self._search_regex( r'src=(.*?)(?:$|&)', video_meta, 'meta tag video URL', default=None) if video_url is None: video_url = self._html_search_regex( r'MediaContentUrl["\']\s*:(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'video url', default=None, group='url') if video_url is None: video_url = self._html_search_meta( 'og:video', webpage, default=None) if video_url is None: raise ExtractorError('Cannot find video') title = self._og_search_title(webpage, default=None) if title is None: title = self._html_search_regex( [r'<b>Title:</b> ([^<]+)</div>', r'class="tabSeperator">></span><span class="tabText">(.+?)<', r'<title>([^<]+)</title>'], webpage, 'title') thumbnail = self._og_search_thumbnail(webpage) description = self._og_search_description(webpage, default=None) if description is None: description = self._html_search_meta('description', webpage) return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/screencastomatic.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import js_to_json class ScreencastOMaticIE(InfoExtractor): _VALID_URL = r'https?://screencast-o-matic\.com/watch/(?P<id>[0-9a-zA-Z]+)' _TEST = { 'url': 'http://screencast-o-matic.com/watch/c2lD3BeOPl', 'md5': '483583cb80d92588f15ccbedd90f0c18', 'info_dict': { 'id': 'c2lD3BeOPl', 'ext': 'mp4', 'title': 'Welcome to 3-4 Philosophy @ DECV!', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'as the title says! also: some general info re 1) VCE philosophy and 2) distance learning.', 'duration': 369.163, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) jwplayer_data = self._parse_json( self._search_regex( r"(?s)jwplayer\('mp4Player'\).setup\((\{.*?\})\);", webpage, 'setup code'), video_id, transform_source=js_to_json) info_dict = self._parse_jwplayer_data(jwplayer_data, video_id, require_title=False) info_dict.update({ 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), }) return info_dict
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/scrippsnetworks.py
# coding: utf-8 from __future__ import unicode_literals import json import hashlib import re from .aws import AWSIE from .anvato import AnvatoIE from ..utils import ( smuggle_url, urlencode_postdata, xpath_text, ) class ScrippsNetworksWatchIE(AWSIE): IE_NAME = 'scrippsnetworks:watch' _VALID_URL = r'''(?x) https?:// watch\. (?P<site>geniuskitchen)\.com/ (?: player\.[A-Z0-9]+\.html\#| show/(?:[^/]+/){2}| player/ ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'http://watch.geniuskitchen.com/player/3787617/Ample-Hills-Ice-Cream-Bike/', 'info_dict': { 'id': '4194875', 'ext': 'mp4', 'title': 'Ample Hills Ice Cream Bike', 'description': 'Courtney Rada churns up a signature GK Now ice cream with The Scoopmaster.', 'uploader': 'ANV', 'upload_date': '20171011', 'timestamp': 1507698000, }, 'params': { 'skip_download': True, }, 'add_ie': [AnvatoIE.ie_key()], }] _SNI_TABLE = { 'geniuskitchen': 'genius', } _AWS_API_KEY = 'E7wSQmq0qK6xPrF13WmzKiHo4BQ7tip4pQcSXVl1' _AWS_PROXY_HOST = 'web.api.video.snidigital.com' _AWS_USER_AGENT = 'aws-sdk-js/2.80.0 callback' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) site_id, video_id = mobj.group('site', 'id') aws_identity_id_json = json.dumps({ 'IdentityId': '%s:7655847c-0ae7-4d9b-80d6-56c062927eb3' % self._AWS_REGION }).encode('utf-8') token = self._download_json( 'https://cognito-identity.%s.amazonaws.com/' % self._AWS_REGION, video_id, data=aws_identity_id_json, headers={ 'Accept': '*/*', 'Content-Type': 'application/x-amz-json-1.1', 'Referer': url, 'X-Amz-Content-Sha256': hashlib.sha256(aws_identity_id_json).hexdigest(), 'X-Amz-Target': 'AWSCognitoIdentityService.GetOpenIdToken', 'X-Amz-User-Agent': self._AWS_USER_AGENT, })['Token'] sts = self._download_xml( 'https://sts.amazonaws.com/', video_id, data=urlencode_postdata({ 'Action': 'AssumeRoleWithWebIdentity', 'RoleArn': 'arn:aws:iam::710330595350:role/Cognito_WebAPIUnauth_Role', 'RoleSessionName': 'web-identity', 'Version': '2011-06-15', 'WebIdentityToken': token, }), headers={ 'Referer': url, 'X-Amz-User-Agent': self._AWS_USER_AGENT, 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8', }) def get(key): return xpath_text( sts, './/{https://sts.amazonaws.com/doc/2011-06-15/}%s' % key, fatal=True) mcp_id = self._aws_execute_api({ 'uri': '/1/web/brands/%s/episodes/scrid/%s' % (self._SNI_TABLE[site_id], video_id), 'access_key': get('AccessKeyId'), 'secret_key': get('SecretAccessKey'), 'session_token': get('SessionToken'), }, video_id)['results'][0]['mcpId'] return self.url_result( smuggle_url( 'anvato:anvato_scripps_app_web_prod_0837996dbe373629133857ae9eb72e740424d80a:%s' % mcp_id, {'geo_countries': ['US']}), AnvatoIE.ie_key(), video_id=mcp_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/seeker.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class SeekerIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?seeker\.com/(?P<display_id>.*)-(?P<article_id>\d+)\.html' _TESTS = [{ # player.loadRevision3Item 'url': 'http://www.seeker.com/should-trump-be-required-to-release-his-tax-returns-1833805621.html', 'md5': '30c1dc4030cc715cf05b423d0947ac18', 'info_dict': { 'id': '76243', 'ext': 'webm', 'title': 'Should Trump Be Required To Release His Tax Returns?', 'description': 'Donald Trump has been secretive about his "big," "beautiful" tax returns. So what can we learn if he decides to release them?', 'uploader': 'Seeker Daily', 'uploader_id': 'seekerdaily', } }, { 'url': 'http://www.seeker.com/changes-expected-at-zoos-following-recent-gorilla-lion-shootings-1834116536.html', 'playlist': [ { 'md5': '83bcd157cab89ad7318dd7b8c9cf1306', 'info_dict': { 'id': '67558', 'ext': 'mp4', 'title': 'The Pros & Cons Of Zoos', 'description': 'Zoos are often depicted as a terrible place for animals to live, but is there any truth to this?', 'uploader': 'DNews', 'uploader_id': 'dnews', }, } ], 'info_dict': { 'id': '1834116536', 'title': 'After Gorilla Killing, Changes Ahead for Zoos', 'description': 'The largest association of zoos and others are hoping to learn from recent incidents that led to the shooting deaths of a gorilla and two lions.', }, }] def _real_extract(self, url): display_id, article_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, display_id) mobj = re.search(r"player\.loadRevision3Item\('([^']+)'\s*,\s*(\d+)\);", webpage) if mobj: playlist_type, playlist_id = mobj.groups() return self.url_result( 'revision3:%s:%s' % (playlist_type, playlist_id), 'Revision3Embed', playlist_id) else: entries = [self.url_result('revision3:video_id:%s' % video_id, 'Revision3Embed', video_id) for video_id in re.findall( r'<iframe[^>]+src=[\'"](?:https?:)?//api\.seekernetwork\.com/player/embed\?videoId=(\d+)', webpage)] return self.playlist_result( entries, article_id, self._og_search_title(webpage), self._og_search_description(webpage))
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/senateisvp.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, unsmuggle_url, ) from ..compat import ( compat_parse_qs, compat_urlparse, ) class SenateISVPIE(InfoExtractor): _COMM_MAP = [ ['ag', '76440', 'http://ag-f.akamaihd.net'], ['aging', '76442', 'http://aging-f.akamaihd.net'], ['approps', '76441', 'http://approps-f.akamaihd.net'], ['armed', '76445', 'http://armed-f.akamaihd.net'], ['banking', '76446', 'http://banking-f.akamaihd.net'], ['budget', '76447', 'http://budget-f.akamaihd.net'], ['cecc', '76486', 'http://srs-f.akamaihd.net'], ['commerce', '80177', 'http://commerce1-f.akamaihd.net'], ['csce', '75229', 'http://srs-f.akamaihd.net'], ['dpc', '76590', 'http://dpc-f.akamaihd.net'], ['energy', '76448', 'http://energy-f.akamaihd.net'], ['epw', '76478', 'http://epw-f.akamaihd.net'], ['ethics', '76449', 'http://ethics-f.akamaihd.net'], ['finance', '76450', 'http://finance-f.akamaihd.net'], ['foreign', '76451', 'http://foreign-f.akamaihd.net'], ['govtaff', '76453', 'http://govtaff-f.akamaihd.net'], ['help', '76452', 'http://help-f.akamaihd.net'], ['indian', '76455', 'http://indian-f.akamaihd.net'], ['intel', '76456', 'http://intel-f.akamaihd.net'], ['intlnarc', '76457', 'http://intlnarc-f.akamaihd.net'], ['jccic', '85180', 'http://jccic-f.akamaihd.net'], ['jec', '76458', 'http://jec-f.akamaihd.net'], ['judiciary', '76459', 'http://judiciary-f.akamaihd.net'], ['rpc', '76591', 'http://rpc-f.akamaihd.net'], ['rules', '76460', 'http://rules-f.akamaihd.net'], ['saa', '76489', 'http://srs-f.akamaihd.net'], ['smbiz', '76461', 'http://smbiz-f.akamaihd.net'], ['srs', '75229', 'http://srs-f.akamaihd.net'], ['uscc', '76487', 'http://srs-f.akamaihd.net'], ['vetaff', '76462', 'http://vetaff-f.akamaihd.net'], ['arch', '', 'http://ussenate-f.akamaihd.net/'] ] _IE_NAME = 'senate.gov' _VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)' _TESTS = [{ 'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png', 'info_dict': { 'id': 'judiciary031715', 'ext': 'mp4', 'title': 'Integrated Senate Video Player', 'thumbnail': r're:^https?://.*\.(?:jpg|png)$', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.senate.gov/isvp/?type=live&comm=commerce&filename=commerce011514.mp4&auto_play=false', 'info_dict': { 'id': 'commerce011514', 'ext': 'mp4', 'title': 'Integrated Senate Video Player' }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.senate.gov/isvp/?type=arch&comm=intel&filename=intel090613&hc_location=ufi', # checksum differs each time 'info_dict': { 'id': 'intel090613', 'ext': 'mp4', 'title': 'Integrated Senate Video Player' } }, { # From http://www.c-span.org/video/?96791-1 'url': 'http://www.senate.gov/isvp?type=live&comm=banking&filename=banking012715', 'only_matching': True, }] @staticmethod def _search_iframe_url(webpage): mobj = re.search( r"<iframe[^>]+src=['\"](?P<url>https?://www\.senate\.gov/isvp/?\?[^'\"]+)['\"]", webpage) if mobj: return mobj.group('url') def _get_info_for_comm(self, committee): for entry in self._COMM_MAP: if entry[0] == committee: return entry[1:] def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) qs = compat_parse_qs(re.match(self._VALID_URL, url).group('qs')) if not qs.get('filename') or not qs.get('type') or not qs.get('comm'): raise ExtractorError('Invalid URL', expected=True) video_id = re.sub(r'.mp4$', '', qs['filename'][0]) webpage = self._download_webpage(url, video_id) if smuggled_data.get('force_title'): title = smuggled_data['force_title'] else: title = self._html_search_regex(r'<title>([^<]+)</title>', webpage, video_id) poster = qs.get('poster') thumbnail = poster[0] if poster else None video_type = qs['type'][0] committee = video_type if video_type == 'arch' else qs['comm'][0] stream_num, domain = self._get_info_for_comm(committee) formats = [] if video_type == 'arch': filename = video_id if '.' in video_id else video_id + '.mp4' formats = [{ # All parameters in the query string are necessary to prevent a 403 error 'url': compat_urlparse.urljoin(domain, filename) + '?v=3.1.0&fp=&r=&g=', }] else: hdcore_sign = 'hdcore=3.1.0' url_params = (domain, video_id, stream_num) f4m_url = '%s/z/%s_1@%s/manifest.f4m?' % url_params + hdcore_sign m3u8_url = '%s/i/%s_1@%s/master.m3u8' % url_params for entry in self._extract_f4m_formats(f4m_url, video_id, f4m_id='f4m'): # URLs without the extra param induce an 404 error entry.update({'extra_param_to_segment_url': hdcore_sign}) formats.append(entry) for entry in self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', m3u8_id='m3u8'): mobj = re.search(r'(?P<tag>(?:-p|-b)).m3u8', entry['url']) if mobj: entry['format_id'] += mobj.group('tag') formats.append(entry) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sendtonews.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( float_or_none, parse_iso8601, update_url_query, int_or_none, determine_protocol, unescapeHTML, ) class SendtoNewsIE(InfoExtractor): _VALID_URL = r'https?://embed\.sendtonews\.com/player2/embedplayer\.php\?.*\bSC=(?P<id>[0-9A-Za-z-]+)' _TEST = { # From http://cleveland.cbslocal.com/2016/05/16/indians-score-season-high-15-runs-in-blowout-win-over-reds-rapid-reaction/ 'url': 'http://embed.sendtonews.com/player2/embedplayer.php?SC=GxfCe0Zo7D-175909-5588&type=single&autoplay=on&sound=YES', 'info_dict': { 'id': 'GxfCe0Zo7D-175909-5588' }, 'playlist_count': 8, # test the first video only to prevent lengthy tests 'playlist': [{ 'info_dict': { 'id': '240385', 'ext': 'mp4', 'title': 'Indians introduce Encarnacion', 'description': 'Indians president of baseball operations Chris Antonetti and Edwin Encarnacion discuss the slugger\'s three-year contract with Cleveland', 'duration': 137.898, 'thumbnail': r're:https?://.*\.jpg$', 'upload_date': '20170105', 'timestamp': 1483649762, }, }], 'params': { # m3u8 download 'skip_download': True, }, } _URL_TEMPLATE = '//embed.sendtonews.com/player2/embedplayer.php?SC=%s' @classmethod def _extract_url(cls, webpage): mobj = re.search(r'''(?x)<script[^>]+src=([\'"]) (?:https?:)?//embed\.sendtonews\.com/player/responsiveembed\.php\? .*\bSC=(?P<SC>[0-9a-zA-Z-]+).* \1>''', webpage) if mobj: sc = mobj.group('SC') return cls._URL_TEMPLATE % sc def _real_extract(self, url): playlist_id = self._match_id(url) data_url = update_url_query( url.replace('embedplayer.php', 'data_read.php'), {'cmd': 'loadInitial'}) playlist_data = self._download_json(data_url, playlist_id) entries = [] for video in playlist_data['playlistData'][0]: info_dict = self._parse_jwplayer_data( video['jwconfiguration'], require_title=False, m3u8_id='hls', rtmp_params={'no_resume': True}) for f in info_dict['formats']: if f.get('tbr'): continue tbr = int_or_none(self._search_regex( r'/(\d+)k/', f['url'], 'bitrate', default=None)) if not tbr: continue f.update({ 'format_id': '%s-%d' % (determine_protocol(f), tbr), 'tbr': tbr, }) self._sort_formats(info_dict['formats'], ('tbr', 'height', 'width', 'format_id')) thumbnails = [] if video.get('thumbnailUrl'): thumbnails.append({ 'id': 'normal', 'url': video['thumbnailUrl'], }) if video.get('smThumbnailUrl'): thumbnails.append({ 'id': 'small', 'url': video['smThumbnailUrl'], }) info_dict.update({ 'title': video['S_headLine'].strip(), 'description': unescapeHTML(video.get('S_fullStory')), 'thumbnails': thumbnails, 'duration': float_or_none(video.get('SM_length')), 'timestamp': parse_iso8601(video.get('S_sysDate'), delimiter=' '), }) entries.append(info_dict) return self.playlist_result(entries, playlist_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/servus.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class ServusIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?servus\.com/(?:(?:at|de)/p/[^/]+|tv/videos)/(?P<id>[aA]{2}-\w+|\d+-\d+)' _TESTS = [{ 'url': 'https://www.servus.com/de/p/Die-Gr%C3%BCnen-aus-Sicht-des-Volkes/AA-1T6VBU5PW1W12/', 'md5': '3e1dd16775aa8d5cbef23628cfffc1f4', 'info_dict': { 'id': 'AA-1T6VBU5PW1W12', 'ext': 'mp4', 'title': 'Die Grünen aus Sicht des Volkes', 'description': 'md5:1247204d85783afe3682644398ff2ec4', 'thumbnail': r're:^https?://.*\.jpg', } }, { 'url': 'https://www.servus.com/at/p/Wie-das-Leben-beginnt/1309984137314-381415152/', 'only_matching': True, }, { 'url': 'https://www.servus.com/tv/videos/aa-1t6vbu5pw1w12/', 'only_matching': True, }, { 'url': 'https://www.servus.com/tv/videos/1380889096408-1235196658/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url).upper() webpage = self._download_webpage(url, video_id) title = self._search_regex( (r'videoLabel\s*=\s*(["\'])(?P<title>(?:(?!\1).)+)\1', r'<h\d+[^>]+\bclass=["\']heading--(?:one|two)["\'][^>]*>(?P<title>[^<]+)'), webpage, 'title', default=None, group='title') or self._og_search_title(webpage) title = re.sub(r'\s*-\s*Servus TV\s*$', '', title) description = self._og_search_description(webpage) thumbnail = self._og_search_thumbnail(webpage) formats = self._extract_m3u8_formats( 'https://stv.rbmbtnx.net/api/v1/manifests/%s.m3u8' % video_id, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sevenplus.py
# coding: utf-8 from __future__ import unicode_literals import re from .brightcove import BrightcoveNewIE from ..compat import compat_str from ..utils import ( try_get, update_url_query, ) class SevenPlusIE(BrightcoveNewIE): IE_NAME = '7plus' _VALID_URL = r'https?://(?:www\.)?7plus\.com\.au/(?P<path>[^?]+\?.*?\bepisode-id=(?P<id>[^&#]+))' _TESTS = [{ 'url': 'https://7plus.com.au/MTYS?episode-id=MTYS7-003', 'info_dict': { 'id': 'MTYS7-003', 'ext': 'mp4', 'title': 'S7 E3 - Wind Surf', 'description': 'md5:29c6a69f21accda7601278f81b46483d', 'uploader_id': '5303576322001', 'upload_date': '20171201', 'timestamp': 1512106377, 'series': 'Mighty Ships', 'season_number': 7, 'episode_number': 3, 'episode': 'Wind Surf', }, 'params': { 'format': 'bestvideo', 'skip_download': True, } }, { 'url': 'https://7plus.com.au/UUUU?episode-id=AUMS43-001', 'only_matching': True, }] def _real_extract(self, url): path, episode_id = re.match(self._VALID_URL, url).groups() media = self._download_json( 'https://videoservice.swm.digital/playback', episode_id, query={ 'appId': '7plus', 'deviceType': 'web', 'platformType': 'web', 'accountId': 5303576322001, 'referenceId': 'ref:' + episode_id, 'deliveryId': 'csai', 'videoType': 'vod', })['media'] for source in media.get('sources', {}): src = source.get('src') if not src: continue source['src'] = update_url_query(src, {'rule': ''}) info = self._parse_brightcove_metadata(media, episode_id) content = self._download_json( 'https://component-cdn.swm.digital/content/' + path, episode_id, headers={ 'market-id': 4, }, fatal=False) or {} for item in content.get('items', {}): if item.get('componentData', {}).get('componentType') == 'infoPanel': for src_key, dst_key in [('title', 'title'), ('shortSynopsis', 'description')]: value = item.get(src_key) if value: info[dst_key] = value info['series'] = try_get( item, lambda x: x['seriesLogo']['name'], compat_str) mobj = re.search(r'^S(\d+)\s+E(\d+)\s+-\s+(.+)$', info['title']) if mobj: info.update({ 'season_number': int(mobj.group(1)), 'episode_number': int(mobj.group(2)), 'episode': mobj.group(3), }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sexu.py
from __future__ import unicode_literals from .common import InfoExtractor class SexuIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?sexu\.com/(?P<id>\d+)' _TEST = { 'url': 'http://sexu.com/961791/', 'md5': 'ff615aca9691053c94f8f10d96cd7884', 'info_dict': { 'id': '961791', 'ext': 'mp4', 'title': 'md5:4d05a19a5fc049a63dbbaf05fb71d91b', 'description': 'md5:2b75327061310a3afb3fbd7d09e2e403', 'categories': list, # NSFW 'thumbnail': r're:https?://.*\.jpg$', 'age_limit': 18, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) jwvideo = self._parse_json( self._search_regex(r'\.setup\(\s*({.+?})\s*\);', webpage, 'jwvideo'), video_id) sources = jwvideo['sources'] formats = [{ 'url': source['file'].replace('\\', ''), 'format_id': source.get('label'), 'height': int(self._search_regex( r'^(\d+)[pP]', source.get('label', ''), 'height', default=None)), } for source in sources if source.get('file')] self._sort_formats(formats) title = self._html_search_regex( r'<title>([^<]+)\s*-\s*Sexu\.Com</title>', webpage, 'title') description = self._html_search_meta( 'description', webpage, 'description') thumbnail = jwvideo.get('image') categories_str = self._html_search_meta( 'keywords', webpage, 'categories') categories = ( None if categories_str is None else categories_str.split(',')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'categories': categories, 'formats': formats, 'age_limit': 18, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/seznamzpravy.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_str, compat_urllib_parse_urlparse, ) from ..utils import ( urljoin, int_or_none, parse_codecs, try_get, ) def _raw_id(src_url): return compat_urllib_parse_urlparse(src_url).path.split('/')[-1] class SeznamZpravyIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?seznamzpravy\.cz/iframe/player\?.*\bsrc=' _TESTS = [{ 'url': 'https://www.seznamzpravy.cz/iframe/player?duration=241&serviceSlug=zpravy&src=https%3A%2F%2Fv39-a.sdn.szn.cz%2Fv_39%2Fvmd%2F5999c902ea707c67d8e267a9%3Ffl%3Dmdk%2C432f65a0%7C&itemType=video&autoPlay=false&title=Sv%C4%9Bt%20bez%20obalu%3A%20%C4%8Ce%C5%A1t%C3%AD%20voj%C3%A1ci%20na%20mis%C3%ADch%20(kr%C3%A1tk%C3%A1%20verze)&series=Sv%C4%9Bt%20bez%20obalu&serviceName=Seznam%20Zpr%C3%A1vy&poster=%2F%2Fd39-a.sdn.szn.cz%2Fd_39%2Fc_img_F_I%2FR5puJ.jpeg%3Ffl%3Dcro%2C0%2C0%2C1920%2C1080%7Cres%2C1200%2C%2C1%7Cjpg%2C80%2C%2C1&width=1920&height=1080&cutFrom=0&cutTo=0&splVersion=VOD&contentId=170889&contextId=35990&showAdvert=true&collocation=&autoplayPossible=true&embed=&isVideoTooShortForPreroll=false&isVideoTooLongForPostroll=true&videoCommentOpKey=&videoCommentId=&version=4.0.76&dotService=zpravy&gemiusPrismIdentifier=bVc1ZIb_Qax4W2v5xOPGpMeCP31kFfrTzj0SqPTLh_b.Z7&zoneIdPreroll=seznam.pack.videospot&skipOffsetPreroll=5&sectionPrefixPreroll=%2Fzpravy', 'info_dict': { 'id': '170889', 'ext': 'mp4', 'title': 'Svět bez obalu: Čeští vojáci na misích (krátká verze)', 'thumbnail': r're:^https?://.*\.jpe?g', 'duration': 241, 'series': 'Svět bez obalu', }, 'params': { 'skip_download': True, }, }, { # with Location key 'url': 'https://www.seznamzpravy.cz/iframe/player?duration=null&serviceSlug=zpravy&src=https%3A%2F%2Flive-a.sdn.szn.cz%2Fv_39%2F59e468fe454f8472a96af9fa%3Ffl%3Dmdk%2C5c1e2840%7C&itemType=livevod&autoPlay=false&title=P%C5%99edseda%20KDU-%C4%8CSL%20Pavel%20B%C4%9Blobr%C3%A1dek%20ve%20volebn%C3%AD%20V%C3%BDzv%C4%9B%20Seznamu&series=V%C3%BDzva&serviceName=Seznam%20Zpr%C3%A1vy&poster=%2F%2Fd39-a.sdn.szn.cz%2Fd_39%2Fc_img_G_J%2FjTBCs.jpeg%3Ffl%3Dcro%2C0%2C0%2C1280%2C720%7Cres%2C1200%2C%2C1%7Cjpg%2C80%2C%2C1&width=16&height=9&cutFrom=0&cutTo=0&splVersion=VOD&contentId=185688&contextId=38489&showAdvert=true&collocation=&hideFullScreen=false&hideSubtitles=false&embed=&isVideoTooShortForPreroll=false&isVideoTooShortForPreroll2=false&isVideoTooLongForPostroll=false&fakePostrollZoneID=seznam.clanky.zpravy.preroll&fakePrerollZoneID=seznam.clanky.zpravy.preroll&videoCommentId=&trim=default_16x9&noPrerollVideoLength=30&noPreroll2VideoLength=undefined&noMidrollVideoLength=0&noPostrollVideoLength=999999&autoplayPossible=true&version=5.0.41&dotService=zpravy&gemiusPrismIdentifier=zD3g7byfW5ekpXmxTVLaq5Srjw5i4hsYo0HY1aBwIe..27&zoneIdPreroll=seznam.pack.videospot&skipOffsetPreroll=5&sectionPrefixPreroll=%2Fzpravy%2Fvyzva&zoneIdPostroll=seznam.pack.videospot&skipOffsetPostroll=5&sectionPrefixPostroll=%2Fzpravy%2Fvyzva&regression=false', 'info_dict': { 'id': '185688', 'ext': 'mp4', 'title': 'Předseda KDU-ČSL Pavel Bělobrádek ve volební Výzvě Seznamu', 'thumbnail': r're:^https?://.*\.jpe?g', 'series': 'Výzva', }, 'params': { 'skip_download': True, }, }] @staticmethod def _extract_urls(webpage): return [ mobj.group('url') for mobj in re.finditer( r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?seznamzpravy\.cz/iframe/player\?.*?)\1', webpage)] def _extract_sdn_formats(self, sdn_url, video_id): sdn_data = self._download_json(sdn_url, video_id) if sdn_data.get('Location'): sdn_url = sdn_data['Location'] sdn_data = self._download_json(sdn_url, video_id) formats = [] mp4_formats = try_get(sdn_data, lambda x: x['data']['mp4'], dict) or {} for format_id, format_data in mp4_formats.items(): relative_url = format_data.get('url') if not relative_url: continue try: width, height = format_data.get('resolution') except (TypeError, ValueError): width, height = None, None f = { 'url': urljoin(sdn_url, relative_url), 'format_id': 'http-%s' % format_id, 'tbr': int_or_none(format_data.get('bandwidth'), scale=1000), 'width': int_or_none(width), 'height': int_or_none(height), } f.update(parse_codecs(format_data.get('codec'))) formats.append(f) pls = sdn_data.get('pls', {}) def get_url(format_id): return try_get(pls, lambda x: x[format_id]['url'], compat_str) dash_rel_url = get_url('dash') if dash_rel_url: formats.extend(self._extract_mpd_formats( urljoin(sdn_url, dash_rel_url), video_id, mpd_id='dash', fatal=False)) hls_rel_url = get_url('hls') if hls_rel_url: formats.extend(self._extract_m3u8_formats( urljoin(sdn_url, hls_rel_url), video_id, ext='mp4', m3u8_id='hls', fatal=False)) self._sort_formats(formats) return formats def _real_extract(self, url): params = compat_parse_qs(compat_urllib_parse_urlparse(url).query) src = params['src'][0] title = params['title'][0] video_id = params.get('contentId', [_raw_id(src)])[0] formats = self._extract_sdn_formats(src + 'spl2,2,VOD', video_id) duration = int_or_none(params.get('duration', [None])[0]) series = params.get('series', [None])[0] thumbnail = params.get('poster', [None])[0] return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'series': series, 'formats': formats, } class SeznamZpravyArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:seznam\.cz/zpravy|seznamzpravy\.cz)/clanek/(?:[^/?#&]+)-(?P<id>\d+)' _API_URL = 'https://apizpravy.seznam.cz/' _TESTS = [{ # two videos on one page, with SDN URL 'url': 'https://www.seznamzpravy.cz/clanek/jejich-svet-na-nas-utoci-je-lepsi-branit-se-na-jejich-pisecku-rika-reziser-a-major-v-zaloze-marhoul-35990', 'info_dict': { 'id': '35990', 'title': 'md5:6011c877a36905f28f271fcd8dcdb0f2', 'description': 'md5:933f7b06fa337a814ba199d3596d27ba', }, 'playlist_count': 2, }, { # video with live stream URL 'url': 'https://www.seznam.cz/zpravy/clanek/znovu-do-vlady-s-ano-pavel-belobradek-ve-volebnim-specialu-seznamu-38489', 'info_dict': { 'id': '38489', 'title': 'md5:8fa1afdc36fd378cf0eba2b74c5aca60', 'description': 'md5:428e7926a1a81986ec7eb23078004fb4', }, 'playlist_count': 1, }] def _real_extract(self, url): article_id = self._match_id(url) webpage = self._download_webpage(url, article_id) info = self._search_json_ld(webpage, article_id, default={}) title = info.get('title') or self._og_search_title(webpage, fatal=False) description = info.get('description') or self._og_search_description(webpage) return self.playlist_result([ self.url_result(entry_url, ie=SeznamZpravyIE.ie_key()) for entry_url in SeznamZpravyIE._extract_urls(webpage)], article_id, title, description)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/shahid.py
# coding: utf-8 from __future__ import unicode_literals import json import math import re from .aws import AWSIE from ..compat import compat_HTTPError from ..utils import ( clean_html, ExtractorError, InAdvancePagedList, int_or_none, parse_iso8601, str_or_none, urlencode_postdata, ) class ShahidBaseIE(AWSIE): _AWS_PROXY_HOST = 'api2.shahid.net' _AWS_API_KEY = '2RRtuMHx95aNI1Kvtn2rChEuwsCogUd4samGPjLh' def _handle_error(self, e): fail_data = self._parse_json( e.cause.read().decode('utf-8'), None, fatal=False) if fail_data: faults = fail_data.get('faults', []) faults_message = ', '.join([clean_html(fault['userMessage']) for fault in faults if fault.get('userMessage')]) if faults_message: raise ExtractorError(faults_message, expected=True) def _call_api(self, path, video_id, request=None): query = {} if request: query['request'] = json.dumps(request) try: return self._aws_execute_api({ 'uri': '/proxy/v2/' + path, 'access_key': 'AKIAI6X4TYCIXM2B7MUQ', 'secret_key': '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', }, video_id, query) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError): self._handle_error(e) raise class ShahidIE(ShahidBaseIE): _NETRC_MACHINE = 'shahid' _VALID_URL = r'https?://shahid\.mbc\.net/ar/(?:serie|show|movie)s/[^/]+/(?P<type>episode|clip|movie)-(?P<id>\d+)' _TESTS = [{ 'url': 'https://shahid.mbc.net/ar/shows/%D9%85%D8%AC%D9%84%D8%B3-%D8%A7%D9%84%D8%B4%D8%A8%D8%A7%D8%A8-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D9%83%D9%84%D9%8A%D8%A8-1/clip-275286', 'info_dict': { 'id': '275286', 'ext': 'mp4', 'title': 'مجلس الشباب الموسم 1 كليب 1', 'timestamp': 1506988800, 'upload_date': '20171003', }, 'params': { # m3u8 download 'skip_download': True, } }, { 'url': 'https://shahid.mbc.net/ar/movies/%D8%A7%D9%84%D9%82%D9%86%D8%A7%D8%B5%D8%A9/movie-151746', 'only_matching': True }, { # shahid plus subscriber only 'url': 'https://shahid.mbc.net/ar/series/%D9%85%D8%B1%D8%A7%D9%8A%D8%A7-2011-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/episode-90511', 'only_matching': True }] def _real_initialize(self): email, password = self._get_login_info() if email is None: return try: user_data = self._download_json( 'https://shahid.mbc.net/wd/service/users/login', None, 'Logging in', data=json.dumps({ 'email': email, 'password': password, 'basic': 'false', }).encode('utf-8'), headers={ 'Content-Type': 'application/json; charset=UTF-8', })['user'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError): self._handle_error(e) raise self._download_webpage( 'https://shahid.mbc.net/populateContext', None, 'Populate Context', data=urlencode_postdata({ 'firstName': user_data['firstName'], 'lastName': user_data['lastName'], 'userName': user_data['email'], 'csg_user_name': user_data['email'], 'subscriberId': user_data['id'], 'sessionId': user_data['sessionId'], })) def _real_extract(self, url): page_type, video_id = re.match(self._VALID_URL, url).groups() if page_type == 'clip': page_type = 'episode' playout = self._call_api( 'playout/url/' + video_id, video_id)['playout'] if playout.get('drm'): raise ExtractorError('This video is DRM protected.', expected=True) formats = self._extract_m3u8_formats(playout['url'], video_id, 'mp4') self._sort_formats(formats) # video = self._call_api( # 'product/id', video_id, { # 'id': video_id, # 'productType': 'ASSET', # 'productSubType': page_type.upper() # })['productModel'] response = self._download_json( 'http://api.shahid.net/api/v1_1/%s/%s' % (page_type, video_id), video_id, 'Downloading video JSON', query={ 'apiKey': 'sh@hid0nlin3', 'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=', }) data = response.get('data', {}) error = data.get('error') if error: raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, '\n'.join(error.values())), expected=True) video = data[page_type] title = video['title'] categories = [ category['name'] for category in video.get('genres', []) if 'name' in category] return { 'id': video_id, 'title': title, 'description': video.get('description'), 'thumbnail': video.get('thumbnailUrl'), 'duration': int_or_none(video.get('duration')), 'timestamp': parse_iso8601(video.get('referenceDate')), 'categories': categories, 'series': video.get('showTitle') or video.get('showName'), 'season': video.get('seasonTitle'), 'season_number': int_or_none(video.get('seasonNumber')), 'season_id': str_or_none(video.get('seasonId')), 'episode_number': int_or_none(video.get('number')), 'episode_id': video_id, 'formats': formats, } class ShahidShowIE(ShahidBaseIE): _VALID_URL = r'https?://shahid\.mbc\.net/ar/(?:show|serie)s/[^/]+/(?:show|series)-(?P<id>\d+)' _TESTS = [{ 'url': 'https://shahid.mbc.net/ar/shows/%D8%B1%D8%A7%D9%85%D8%B2-%D9%82%D8%B1%D8%B4-%D8%A7%D9%84%D8%A8%D8%AD%D8%B1/show-79187', 'info_dict': { 'id': '79187', 'title': 'رامز قرش البحر', 'description': 'md5:c88fa7e0f02b0abd39d417aee0d046ff', }, 'playlist_mincount': 32, }, { 'url': 'https://shahid.mbc.net/ar/series/How-to-live-Longer-(The-Big-Think)/series-291861', 'only_matching': True }] _PAGE_SIZE = 30 def _real_extract(self, url): show_id = self._match_id(url) product = self._call_api( 'playableAsset', show_id, {'showId': show_id})['productModel'] playlist = product['playlist'] playlist_id = playlist['id'] show = product.get('show', {}) def page_func(page_num): playlist = self._call_api( 'product/playlist', show_id, { 'playListId': playlist_id, 'pageNumber': page_num, 'pageSize': 30, 'sorts': [{ 'order': 'DESC', 'type': 'SORTDATE' }], }) for product in playlist.get('productList', {}).get('products', []): product_url = product.get('productUrl', []).get('url') if not product_url: continue yield self.url_result( product_url, 'Shahid', str_or_none(product.get('id')), product.get('title')) entries = InAdvancePagedList( page_func, math.ceil(playlist['count'] / self._PAGE_SIZE), self._PAGE_SIZE) return self.playlist_result( entries, show_id, show.get('title'), show.get('description'))
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/shared.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_b64decode from ..utils import ( determine_ext, ExtractorError, int_or_none, KNOWN_EXTENSIONS, parse_filesize, url_or_none, urlencode_postdata, ) class SharedBaseIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) webpage, urlh = self._download_webpage_handle(url, video_id) if self._FILE_NOT_FOUND in webpage: raise ExtractorError( 'Video %s does not exist' % video_id, expected=True) video_url = self._extract_video_url(webpage, video_id, url) title = self._extract_title(webpage) filesize = int_or_none(self._extract_filesize(webpage)) return { 'id': video_id, 'url': video_url, 'ext': 'mp4', 'filesize': filesize, 'title': title, } def _extract_title(self, webpage): return compat_b64decode(self._html_search_meta( 'full:title', webpage, 'title')).decode('utf-8') def _extract_filesize(self, webpage): return self._html_search_meta( 'full:size', webpage, 'file size', fatal=False) class SharedIE(SharedBaseIE): IE_DESC = 'shared.sx' _VALID_URL = r'https?://shared\.sx/(?P<id>[\da-z]{10})' _FILE_NOT_FOUND = '>File does not exist<' _TEST = { 'url': 'http://shared.sx/0060718775', 'md5': '106fefed92a8a2adb8c98e6a0652f49b', 'info_dict': { 'id': '0060718775', 'ext': 'mp4', 'title': 'Bmp4', 'filesize': 1720110, }, } def _extract_video_url(self, webpage, video_id, url): download_form = self._hidden_inputs(webpage) video_page = self._download_webpage( url, video_id, 'Downloading video page', data=urlencode_postdata(download_form), headers={ 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': url, }) video_url = self._html_search_regex( r'data-url=(["\'])(?P<url>(?:(?!\1).)+)\1', video_page, 'video URL', group='url') return video_url class VivoIE(SharedBaseIE): IE_DESC = 'vivo.sx' _VALID_URL = r'https?://vivo\.sx/(?P<id>[\da-z]{10})' _FILE_NOT_FOUND = '>The file you have requested does not exists or has been removed' _TEST = { 'url': 'http://vivo.sx/d7ddda0e78', 'md5': '15b3af41be0b4fe01f4df075c2678b2c', 'info_dict': { 'id': 'd7ddda0e78', 'ext': 'mp4', 'title': 'Chicken', 'filesize': 515659, }, } def _extract_title(self, webpage): title = self._html_search_regex( r'data-name\s*=\s*(["\'])(?P<title>(?:(?!\1).)+)\1', webpage, 'title', default=None, group='title') if title: ext = determine_ext(title) if ext.lower() in KNOWN_EXTENSIONS: title = title.rpartition('.' + ext)[0] return title return self._og_search_title(webpage) def _extract_filesize(self, webpage): return parse_filesize(self._search_regex( r'data-type=["\']video["\'][^>]*>Watch.*?<strong>\s*\((.+?)\)', webpage, 'filesize', fatal=False)) def _extract_video_url(self, webpage, video_id, url): def decode_url(encoded_url): return compat_b64decode(encoded_url).decode('utf-8') stream_url = url_or_none(decode_url(self._search_regex( r'data-stream\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'stream url', default=None, group='url'))) if stream_url: return stream_url return self._parse_json( self._search_regex( r'InitializeStream\s*\(\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'stream', group='url'), video_id, transform_source=decode_url)[0]
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/showroomlive.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, int_or_none, urljoin, ) class ShowRoomLiveIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?showroom-live\.com/(?!onlive|timetable|event|campaign|news|ranking|room)(?P<id>[^/?#&]+)' _TEST = { 'url': 'https://www.showroom-live.com/48_Nana_Okada', 'only_matching': True, } def _real_extract(self, url): broadcaster_id = self._match_id(url) webpage = self._download_webpage(url, broadcaster_id) room_id = self._search_regex( (r'SrGlobal\.roomId\s*=\s*(\d+)', r'(?:profile|room)\?room_id\=(\d+)'), webpage, 'room_id') room = self._download_json( urljoin(url, '/api/room/profile?room_id=%s' % room_id), broadcaster_id) is_live = room.get('is_onlive') if is_live is not True: raise ExtractorError('%s is offline' % broadcaster_id, expected=True) uploader = room.get('performer_name') or broadcaster_id title = room.get('room_name') or room.get('main_name') or uploader streaming_url_list = self._download_json( urljoin(url, '/api/live/streaming_url?room_id=%s' % room_id), broadcaster_id)['streaming_url_list'] formats = [] for stream in streaming_url_list: stream_url = stream.get('url') if not stream_url: continue stream_type = stream.get('type') if stream_type == 'hls': m3u8_formats = self._extract_m3u8_formats( stream_url, broadcaster_id, ext='mp4', m3u8_id='hls', live=True) for f in m3u8_formats: f['quality'] = int_or_none(stream.get('quality', 100)) formats.extend(m3u8_formats) elif stream_type == 'rtmp': stream_name = stream.get('stream_name') if not stream_name: continue formats.append({ 'url': stream_url, 'play_path': stream_name, 'page_url': url, 'player_url': 'https://www.showroom-live.com/assets/swf/v3/ShowRoomLive.swf', 'rtmp_live': True, 'ext': 'flv', 'format_id': 'rtmp', 'format_note': stream.get('label'), 'quality': int_or_none(stream.get('quality', 100)), }) self._sort_formats(formats) return { 'id': compat_str(room.get('live_id') or broadcaster_id), 'title': self._live_title(title), 'description': room.get('description'), 'timestamp': int_or_none(room.get('current_live_started_at')), 'uploader': uploader, 'uploader_id': broadcaster_id, 'view_count': int_or_none(room.get('view_num')), 'formats': formats, 'is_live': True, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sina.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( HEADRequest, ExtractorError, int_or_none, update_url_query, qualities, get_element_by_attribute, clean_html, ) class SinaIE(InfoExtractor): _VALID_URL = r'''(?x)https?://(?:.*?\.)?video\.sina\.com\.cn/ (?: (?:view/|.*\#)(?P<video_id>\d+)| .+?/(?P<pseudo_id>[^/?#]+)(?:\.s?html)| # This is used by external sites like Weibo api/sinawebApi/outplay.php/(?P<token>.+?)\.swf ) ''' _TESTS = [ { 'url': 'http://video.sina.com.cn/news/spj/topvideoes20160504/?opsubject_id=top1#250576622', 'md5': 'd38433e2fc886007729735650ae4b3e9', 'info_dict': { 'id': '250576622', 'ext': 'mp4', 'title': '现场:克鲁兹宣布退选 特朗普将稳获提名', } }, { 'url': 'http://video.sina.com.cn/v/b/101314253-1290078633.html', 'info_dict': { 'id': '101314253', 'ext': 'flv', 'title': '军方提高对朝情报监视级别', }, 'skip': 'the page does not exist or has been deleted', }, { 'url': 'http://video.sina.com.cn/view/250587748.html', 'md5': '3d1807a25c775092aab3bc157fff49b4', 'info_dict': { 'id': '250587748', 'ext': 'mp4', 'title': '瞬间泪目:8年前汶川地震珍贵视频首曝光', }, }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('video_id') if not video_id: if mobj.group('token') is not None: # The video id is in the redirected url self.to_screen('Getting video id') request = HEADRequest(url) _, urlh = self._download_webpage_handle(request, 'NA', False) return self._real_extract(urlh.geturl()) else: pseudo_id = mobj.group('pseudo_id') webpage = self._download_webpage(url, pseudo_id) error = get_element_by_attribute('class', 'errtitle', webpage) if error: raise ExtractorError('%s said: %s' % ( self.IE_NAME, clean_html(error)), expected=True) video_id = self._search_regex( r"video_id\s*:\s*'(\d+)'", webpage, 'video id') video_data = self._download_json( 'http://s.video.sina.com.cn/video/h5play', video_id, query={'video_id': video_id}) if video_data['code'] != 1: raise ExtractorError('%s said: %s' % ( self.IE_NAME, video_data['message']), expected=True) else: video_data = video_data['data'] title = video_data['title'] description = video_data.get('description') if description: description = description.strip() preference = qualities(['cif', 'sd', 'hd', 'fhd', 'ffd']) formats = [] for quality_id, quality in video_data.get('videos', {}).get('mp4', {}).items(): file_api = quality.get('file_api') file_id = quality.get('file_id') if not file_api or not file_id: continue formats.append({ 'format_id': quality_id, 'url': update_url_query(file_api, {'vid': file_id}), 'preference': preference(quality_id), 'ext': 'mp4', }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': video_data.get('image'), 'duration': int_or_none(video_data.get('length')), 'timestamp': int_or_none(video_data.get('create_time')), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sixplay.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_str, compat_urllib_parse_urlparse, ) from ..utils import ( determine_ext, int_or_none, try_get, qualities, ) class SixPlayIE(InfoExtractor): IE_NAME = '6play' _VALID_URL = r'(?:6play:|https?://(?:www\.)?(?P<domain>6play\.fr|rtlplay\.be|play\.rtl\.hr|rtlmost\.hu)/.+?-c_)(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.6play.fr/minute-par-minute-p_9533/le-but-qui-a-marque-lhistoire-du-football-francais-c_12041051', 'md5': '31fcd112637baa0c2ab92c4fcd8baf27', 'info_dict': { 'id': '12041051', 'ext': 'mp4', 'title': 'Le but qui a marqué l\'histoire du football français !', 'description': 'md5:b59e7e841d646ef1eb42a7868eb6a851', }, }, { 'url': 'https://www.rtlplay.be/rtl-info-13h-p_8551/les-titres-du-rtlinfo-13h-c_12045869', 'only_matching': True, }, { 'url': 'https://play.rtl.hr/pj-masks-p_9455/epizoda-34-sezona-1-catboyevo-cudo-na-dva-kotaca-c_11984989', 'only_matching': True, }, { 'url': 'https://www.rtlmost.hu/megtorve-p_14167/megtorve-6-resz-c_12397787', 'only_matching': True, }] def _real_extract(self, url): domain, video_id = re.search(self._VALID_URL, url).groups() service, consumer_name = { '6play.fr': ('6play', 'm6web'), 'rtlplay.be': ('rtlbe_rtl_play', 'rtlbe'), 'play.rtl.hr': ('rtlhr_rtl_play', 'rtlhr'), 'rtlmost.hu': ('rtlhu_rtl_most', 'rtlhu'), }.get(domain, ('6play', 'm6web')) data = self._download_json( 'https://pc.middleware.6play.fr/6play/v2/platforms/m6group_web/services/%s/videos/clip_%s' % (service, video_id), video_id, headers={ 'x-customer-name': consumer_name }, query={ 'csa': 5, 'with': 'clips', }) clip_data = data['clips'][0] title = clip_data['title'] urls = [] quality_key = qualities(['lq', 'sd', 'hq', 'hd']) formats = [] subtitles = {} assets = clip_data.get('assets') or [] for asset in assets: asset_url = asset.get('full_physical_path') protocol = asset.get('protocol') if not asset_url or ((protocol == 'primetime' or asset.get('type') == 'usp_hlsfp_h264') and not ('_drmnp.ism/' in asset_url or '_unpnp.ism/' in asset_url)) or asset_url in urls: continue urls.append(asset_url) container = asset.get('video_container') ext = determine_ext(asset_url) if protocol == 'http_subtitle' or ext == 'vtt': subtitles.setdefault('fr', []).append({'url': asset_url}) continue if container == 'm3u8' or ext == 'm3u8': if protocol == 'usp': if compat_parse_qs(compat_urllib_parse_urlparse(asset_url).query).get('token', [None])[0]: urlh = self._request_webpage( asset_url, video_id, fatal=False, headers=self.geo_verification_headers()) if not urlh: continue asset_url = urlh.geturl() asset_url = asset_url.replace('_drmnp.ism/', '_unpnp.ism/') for i in range(3, 0, -1): asset_url = asset_url = asset_url.replace('_sd1/', '_sd%d/' % i) m3u8_formats = self._extract_m3u8_formats( asset_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) formats.extend(m3u8_formats) formats.extend(self._extract_mpd_formats( asset_url.replace('.m3u8', '.mpd'), video_id, mpd_id='dash', fatal=False)) if m3u8_formats: break else: formats.extend(self._extract_m3u8_formats( asset_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif container == 'mp4' or ext == 'mp4': quality = asset.get('video_quality') formats.append({ 'url': asset_url, 'format_id': quality, 'quality': quality_key(quality), 'ext': ext, }) self._sort_formats(formats) def get(getter): for src in (data, clip_data): v = try_get(src, getter, compat_str) if v: return v return { 'id': video_id, 'title': title, 'description': get(lambda x: x['description']), 'duration': int_or_none(clip_data.get('duration')), 'series': get(lambda x: x['program']['title']), 'formats': formats, 'subtitles': subtitles, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sky.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( extract_attributes, smuggle_url, strip_or_none, urljoin, ) class SkyBaseIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_data = extract_attributes(self._search_regex( r'(<div.+?class="[^"]*sdc-article-video__media-ooyala[^"]*"[^>]+>)', webpage, 'video data')) video_url = 'ooyala:%s' % video_data['data-video-id'] if video_data.get('data-token-required') == 'true': token_fetch_options = self._parse_json(video_data.get( 'data-token-fetch-options', '{}'), video_id, fatal=False) or {} token_fetch_url = token_fetch_options.get('url') if token_fetch_url: embed_token = self._download_webpage(urljoin( url, token_fetch_url), video_id, fatal=False) if embed_token: video_url = smuggle_url( video_url, {'embed_token': embed_token.strip('"')}) return { '_type': 'url_transparent', 'id': video_id, 'url': video_url, 'title': self._og_search_title(webpage), 'description': strip_or_none(self._og_search_description(webpage)), 'ie_key': 'Ooyala', } class SkySportsIE(SkyBaseIE): _VALID_URL = r'https?://(?:www\.)?skysports\.com/watch/video/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.skysports.com/watch/video/10328419/bale-its-our-time-to-shine', 'md5': '77d59166cddc8d3cb7b13e35eaf0f5ec', 'info_dict': { 'id': 'o3eWJnNDE6l7kfNO8BOoBlRxXRQ4ANNQ', 'ext': 'mp4', 'title': 'Bale: It\'s our time to shine', 'description': 'md5:e88bda94ae15f7720c5cb467e777bb6d', }, 'add_ie': ['Ooyala'], } class SkyNewsIE(SkyBaseIE): _VALID_URL = r'https?://news\.sky\.com/video/[0-9a-z-]+-(?P<id>[0-9]+)' _TEST = { 'url': 'https://news.sky.com/video/russian-plane-inspected-after-deadly-fire-11712962', 'md5': 'd6327e581473cea9976a3236ded370cd', 'info_dict': { 'id': '1ua21xaDE6lCtZDmbYfl8kwsKLooJbNM', 'ext': 'mp4', 'title': 'Russian plane inspected after deadly fire', 'description': 'The Russian Investigative Committee has released video of the wreckage of a passenger plane which caught fire near Moscow.', }, 'add_ie': ['Ooyala'], }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/skylinewebcams.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class SkylineWebcamsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?skylinewebcams\.com/[^/]+/webcam/(?:[^/]+/)+(?P<id>[^/]+)\.html' _TEST = { 'url': 'https://www.skylinewebcams.com/it/webcam/italia/lazio/roma/scalinata-piazza-di-spagna-barcaccia.html', 'info_dict': { 'id': 'scalinata-piazza-di-spagna-barcaccia', 'ext': 'mp4', 'title': 're:^Live Webcam Scalinata di Piazza di Spagna - La Barcaccia [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'Roma, veduta sulla Scalinata di Piazza di Spagna e sulla Barcaccia', 'is_live': True, }, 'params': { 'skip_download': True, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) stream_url = self._search_regex( r'(?:url|source)\s*:\s*(["\'])(?P<url>(?:https?:)?//.+?\.m3u8.*?)\1', webpage, 'stream url', group='url') title = self._og_search_title(webpage) description = self._og_search_description(webpage) return { 'id': video_id, 'url': stream_url, 'ext': 'mp4', 'title': self._live_title(title), 'description': description, 'is_live': True, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/skynewsarabia.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( parse_iso8601, parse_duration, ) class SkyNewsArabiaBaseIE(InfoExtractor): _IMAGE_BASE_URL = 'http://www.skynewsarabia.com/web/images' def _call_api(self, path, value): return self._download_json('http://api.skynewsarabia.com/web/rest/v2/%s/%s.json' % (path, value), value) def _get_limelight_media_id(self, url): return self._search_regex(r'/media/[^/]+/([a-z0-9]{32})', url, 'limelight media id') def _get_image_url(self, image_path_template, width='1600', height='1200'): return self._IMAGE_BASE_URL + image_path_template.format(width=width, height=height) def _extract_video_info(self, video_data): video_id = compat_str(video_data['id']) topic = video_data.get('topicTitle') return { '_type': 'url_transparent', 'url': 'limelight:media:%s' % self._get_limelight_media_id(video_data['videoUrl'][0]['url']), 'id': video_id, 'title': video_data['headline'], 'description': video_data.get('summary'), 'thumbnail': self._get_image_url(video_data['mediaAsset']['imageUrl']), 'timestamp': parse_iso8601(video_data.get('date')), 'duration': parse_duration(video_data.get('runTime')), 'tags': video_data.get('tags', []), 'categories': [topic] if topic else [], 'webpage_url': 'http://www.skynewsarabia.com/web/video/%s' % video_id, 'ie_key': 'LimelightMedia', } class SkyNewsArabiaIE(SkyNewsArabiaBaseIE): IE_NAME = 'skynewsarabia:video' _VALID_URL = r'https?://(?:www\.)?skynewsarabia\.com/web/video/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.skynewsarabia.com/web/video/794902/%D9%86%D8%B5%D9%81-%D9%85%D9%84%D9%8A%D9%88%D9%86-%D9%85%D8%B5%D8%A8%D8%A7%D8%AD-%D8%B4%D8%AC%D8%B1%D8%A9-%D9%83%D8%B1%D9%8A%D8%B3%D9%85%D8%A7%D8%B3', 'info_dict': { 'id': '794902', 'ext': 'flv', 'title': 'نصف مليون مصباح على شجرة كريسماس', 'description': 'md5:22f1b27f0850eeb10c7e59b1f16eb7c6', 'upload_date': '20151128', 'timestamp': 1448697198, 'duration': 2119, }, 'params': { # rtmp download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) video_data = self._call_api('video', video_id) return self._extract_video_info(video_data) class SkyNewsArabiaArticleIE(SkyNewsArabiaBaseIE): IE_NAME = 'skynewsarabia:article' _VALID_URL = r'https?://(?:www\.)?skynewsarabia\.com/web/article/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://www.skynewsarabia.com/web/article/794549/%D8%A7%D9%94%D8%AD%D8%AF%D8%A7%D8%AB-%D8%A7%D9%84%D8%B4%D8%B1%D9%82-%D8%A7%D9%84%D8%A7%D9%94%D9%88%D8%B3%D8%B7-%D8%AE%D8%B1%D9%8A%D8%B7%D8%A9-%D8%A7%D9%84%D8%A7%D9%94%D9%84%D8%B9%D8%A7%D8%A8-%D8%A7%D9%84%D8%B0%D9%83%D9%8A%D8%A9', 'info_dict': { 'id': '794549', 'ext': 'flv', 'title': 'بالفيديو.. ألعاب ذكية تحاكي واقع المنطقة', 'description': 'md5:0c373d29919a851e080ee4edd0c5d97f', 'upload_date': '20151126', 'timestamp': 1448559336, 'duration': 281.6, }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://www.skynewsarabia.com/web/article/794844/%D8%A7%D8%B3%D8%AA%D9%87%D8%AF%D8%A7%D9%81-%D9%82%D9%88%D8%A7%D8%B1%D8%A8-%D8%A7%D9%94%D8%B3%D9%84%D8%AD%D8%A9-%D9%84%D9%85%D9%8A%D9%84%D9%8A%D8%B4%D9%8A%D8%A7%D8%AA-%D8%A7%D9%84%D8%AD%D9%88%D8%AB%D9%8A-%D9%88%D8%B5%D8%A7%D9%84%D8%AD', 'info_dict': { 'id': '794844', 'title': 'إحباط تهريب أسلحة لميليشيات الحوثي وصالح بجنوب اليمن', 'description': 'md5:5c927b8b2e805796e7f693538d96fc7e', }, 'playlist_mincount': 2, }] def _real_extract(self, url): article_id = self._match_id(url) article_data = self._call_api('article', article_id) media_asset = article_data['mediaAsset'] if media_asset['type'] == 'VIDEO': topic = article_data.get('topicTitle') return { '_type': 'url_transparent', 'url': 'limelight:media:%s' % self._get_limelight_media_id(media_asset['videoUrl'][0]['url']), 'id': article_id, 'title': article_data['headline'], 'description': article_data.get('summary'), 'thumbnail': self._get_image_url(media_asset['imageUrl']), 'timestamp': parse_iso8601(article_data.get('date')), 'tags': article_data.get('tags', []), 'categories': [topic] if topic else [], 'webpage_url': url, 'ie_key': 'LimelightMedia', } entries = [self._extract_video_info(item) for item in article_data.get('inlineItems', []) if item['type'] == 'VIDEO'] return self.playlist_result(entries, article_id, article_data['headline'], article_data.get('summary'))
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/slideshare.py
from __future__ import unicode_literals import re import json from .common import InfoExtractor from ..compat import ( compat_urlparse, ) from ..utils import ( ExtractorError, get_element_by_id, ) class SlideshareIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?slideshare\.net/[^/]+?/(?P<title>.+?)($|\?)' _TEST = { 'url': 'http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity', 'info_dict': { 'id': '25665706', 'ext': 'mp4', 'title': 'Managing Scale and Complexity', 'description': 'This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by Adrian Cockcroft from Netflix.', }, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) page_title = mobj.group('title') webpage = self._download_webpage(url, page_title) slideshare_obj = self._search_regex( r'\$\.extend\(.*?slideshare_object,\s*(\{.*?\})\);', webpage, 'slideshare object') info = json.loads(slideshare_obj) if info['slideshow']['type'] != 'video': raise ExtractorError('Webpage type is "%s": only video extraction is supported for Slideshare' % info['slideshow']['type'], expected=True) doc = info['doc'] bucket = info['jsplayer']['video_bucket'] ext = info['jsplayer']['video_extension'] video_url = compat_urlparse.urljoin(bucket, doc + '-SD.' + ext) description = get_element_by_id('slideshow-description-paragraph', webpage) or self._html_search_regex( r'(?s)<p[^>]+itemprop="description"[^>]*>(.+?)</p>', webpage, 'description', fatal=False) return { '_type': 'video', 'id': info['slideshow']['id'], 'title': info['slideshow']['title'], 'ext': ext, 'url': video_url, 'thumbnail': info['slideshow']['pin_image_url'], 'description': description.strip() if description else None, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/slideslive.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ExtractorError class SlidesLiveIE(InfoExtractor): _VALID_URL = r'https?://slideslive\.com/(?P<id>[0-9]+)' _TESTS = [{ # video_service_name = YOUTUBE 'url': 'https://slideslive.com/38902413/gcc-ia16-backend', 'md5': 'b29fcd6c6952d0c79c5079b0e7a07e6f', 'info_dict': { 'id': 'LMtgR8ba0b0', 'ext': 'mp4', 'title': '38902413: external video', 'description': '3890241320170925-9-1yd6ech.mp4', 'uploader': 'SlidesLive Administrator', 'uploader_id': 'UC62SdArr41t_-_fX40QCLRw', 'upload_date': '20170925', } }, { # video_service_name = youtube 'url': 'https://slideslive.com/38903721/magic-a-scientific-resurrection-of-an-esoteric-legend', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( url, video_id, headers={'Accept': 'application/json'}) service_name = video_data['video_service_name'].lower() if service_name == 'youtube': yt_video_id = video_data['video_service_id'] return self.url_result(yt_video_id, 'Youtube', video_id=yt_video_id) else: raise ExtractorError( 'Unsupported service name: {0}'.format(service_name), expected=True)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/slutload.py
from __future__ import unicode_literals from .common import InfoExtractor class SlutloadIE(InfoExtractor): _VALID_URL = r'https?://(?:\w+\.)?slutload\.com/(?:video/[^/]+|embed_player|watch)/(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.slutload.com/video/virginie-baisee-en-cam/TD73btpBqSxc/', 'md5': '868309628ba00fd488cf516a113fd717', 'info_dict': { 'id': 'TD73btpBqSxc', 'ext': 'mp4', 'title': 'virginie baisee en cam', 'age_limit': 18, 'thumbnail': r're:https?://.*?\.jpg' }, }, { # mobile site 'url': 'http://mobile.slutload.com/video/masturbation-solo/fviFLmc6kzJ/', 'only_matching': True, }, { 'url': 'http://www.slutload.com/embed_player/TD73btpBqSxc/', 'only_matching': True, }, { 'url': 'http://www.slutload.com/watch/TD73btpBqSxc/Virginie-Baisee-En-Cam.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) embed_page = self._download_webpage( 'http://www.slutload.com/embed_player/%s' % video_id, video_id, 'Downloading embed page', fatal=False) if embed_page: def extract(what): return self._html_search_regex( r'data-video-%s=(["\'])(?P<url>(?:(?!\1).)+)\1' % what, embed_page, 'video %s' % what, default=None, group='url') video_url = extract('url') if video_url: title = self._html_search_regex( r'<title>([^<]+)', embed_page, 'title', default=video_id) return { 'id': video_id, 'url': video_url, 'title': title, 'thumbnail': extract('preview'), 'age_limit': 18 } webpage = self._download_webpage( 'http://www.slutload.com/video/_/%s/' % video_id, video_id) title = self._html_search_regex( r'<h1><strong>([^<]+)</strong>', webpage, 'title').strip() info = self._parse_html5_media_entries(url, webpage, video_id)[0] info.update({ 'id': video_id, 'title': title, 'age_limit': 18, }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/smotri.py
# coding: utf-8 from __future__ import unicode_literals import re import json import hashlib import uuid from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, sanitized_Request, unified_strdate, urlencode_postdata, xpath_text, ) class SmotriIE(InfoExtractor): IE_DESC = 'Smotri.com' IE_NAME = 'smotri' _VALID_URL = r'https?://(?:www\.)?(?:smotri\.com/video/view/\?id=|pics\.smotri\.com/(?:player|scrubber_custom8)\.swf\?file=)(?P<id>v(?P<realvideoid>[0-9]+)[a-z0-9]{4})' _NETRC_MACHINE = 'smotri' _TESTS = [ # real video id 2610366 { 'url': 'http://smotri.com/video/view/?id=v261036632ab', 'md5': '02c0dfab2102984e9c5bb585cc7cc321', 'info_dict': { 'id': 'v261036632ab', 'ext': 'mp4', 'title': 'катастрофа с камер видеонаблюдения', 'uploader': 'rbc2008', 'uploader_id': 'rbc08', 'upload_date': '20131118', 'thumbnail': 'http://frame6.loadup.ru/8b/a9/2610366.3.3.jpg', }, }, # real video id 57591 { 'url': 'http://smotri.com/video/view/?id=v57591cb20', 'md5': '830266dfc21f077eac5afd1883091bcd', 'info_dict': { 'id': 'v57591cb20', 'ext': 'flv', 'title': 'test', 'uploader': 'Support Photofile@photofile', 'uploader_id': 'support-photofile', 'upload_date': '20070704', 'thumbnail': 'http://frame4.loadup.ru/03/ed/57591.2.3.jpg', }, }, # video-password, not approved by moderator { 'url': 'http://smotri.com/video/view/?id=v1390466a13c', 'md5': 'f6331cef33cad65a0815ee482a54440b', 'info_dict': { 'id': 'v1390466a13c', 'ext': 'mp4', 'title': 'TOCCA_A_NOI_-_LE_COSE_NON_VANNO_CAMBIAMOLE_ORA-1', 'uploader': 'timoxa40', 'uploader_id': 'timoxa40', 'upload_date': '20100404', 'thumbnail': 'http://frame7.loadup.ru/af/3f/1390466.3.3.jpg', }, 'params': { 'videopassword': 'qwerty', }, 'skip': 'Video is not approved by moderator', }, # video-password { 'url': 'http://smotri.com/video/view/?id=v6984858774#', 'md5': 'f11e01d13ac676370fc3b95b9bda11b0', 'info_dict': { 'id': 'v6984858774', 'ext': 'mp4', 'title': 'Дача Солженицина ПАРОЛЬ 223322', 'uploader': 'psavari1', 'uploader_id': 'psavari1', 'upload_date': '20081103', 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { 'videopassword': '223322', }, }, # age limit + video-password, not approved by moderator { 'url': 'http://smotri.com/video/view/?id=v15408898bcf', 'md5': '91e909c9f0521adf5ee86fbe073aad70', 'info_dict': { 'id': 'v15408898bcf', 'ext': 'flv', 'title': 'этот ролик не покажут по ТВ', 'uploader': 'zzxxx', 'uploader_id': 'ueggb', 'upload_date': '20101001', 'thumbnail': 'http://frame3.loadup.ru/75/75/1540889.1.3.jpg', 'age_limit': 18, }, 'params': { 'videopassword': '333' }, 'skip': 'Video is not approved by moderator', }, # age limit + video-password { 'url': 'http://smotri.com/video/view/?id=v7780025814', 'md5': 'b4599b068422559374a59300c5337d72', 'info_dict': { 'id': 'v7780025814', 'ext': 'mp4', 'title': 'Sexy Beach (пароль 123)', 'uploader': 'вАся', 'uploader_id': 'asya_prosto', 'upload_date': '20081218', 'thumbnail': r're:^https?://.*\.jpg$', 'age_limit': 18, }, 'params': { 'videopassword': '123' }, }, # swf player { 'url': 'http://pics.smotri.com/scrubber_custom8.swf?file=v9188090500', 'md5': '31099eeb4bc906712c5f40092045108d', 'info_dict': { 'id': 'v9188090500', 'ext': 'mp4', 'title': 'Shakira - Don\'t Bother', 'uploader': 'HannahL', 'uploader_id': 'lisaha95', 'upload_date': '20090331', 'thumbnail': 'http://frame8.loadup.ru/44/0b/918809.7.3.jpg', }, }, ] @classmethod def _extract_url(cls, webpage): mobj = re.search( r'<embed[^>]src=(["\'])(?P<url>http://pics\.smotri\.com/(?:player|scrubber_custom8)\.swf\?file=v.+?\1)', webpage) if mobj is not None: return mobj.group('url') mobj = re.search( r'''(?x)<div\s+class="video_file">http://smotri\.com/video/download/file/[^<]+</div>\s* <div\s+class="video_image">[^<]+</div>\s* <div\s+class="video_id">(?P<id>[^<]+)</div>''', webpage) if mobj is not None: return 'http://smotri.com/video/view/?id=%s' % mobj.group('id') def _search_meta(self, name, html, display_name=None): if display_name is None: display_name = name return self._html_search_meta(name, html, display_name) def _real_extract(self, url): video_id = self._match_id(url) video_form = { 'ticket': video_id, 'video_url': '1', 'frame_url': '1', 'devid': 'LoadupFlashPlayer', 'getvideoinfo': '1', } video_password = self._downloader.params.get('videopassword') if video_password: video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest() video = self._download_json( 'http://smotri.com/video/view/url/bot/', video_id, 'Downloading video JSON', data=urlencode_postdata(video_form), headers={'Content-Type': 'application/x-www-form-urlencoded'}) video_url = video.get('_vidURL') or video.get('_vidURL_mp4') if not video_url: if video.get('_moderate_no'): raise ExtractorError( 'Video %s has not been approved by moderator' % video_id, expected=True) if video.get('error'): raise ExtractorError('Video %s does not exist' % video_id, expected=True) if video.get('_pass_protected') == 1: msg = ('Invalid video password' if video_password else 'This video is protected by a password, use the --video-password option') raise ExtractorError(msg, expected=True) title = video['title'] thumbnail = video.get('_imgURL') upload_date = unified_strdate(video.get('added')) uploader = video.get('userNick') uploader_id = video.get('userLogin') duration = int_or_none(video.get('duration')) # Video JSON does not provide enough meta data # We will extract some from the video web page instead webpage_url = 'http://smotri.com/video/view/?id=%s' % video_id webpage = self._download_webpage(webpage_url, video_id, 'Downloading video page') # Warning if video is unavailable warning = self._html_search_regex( r'<div[^>]+class="videoUnModer"[^>]*>(.+?)</div>', webpage, 'warning message', default=None) if warning is not None: self._downloader.report_warning( 'Video %s may not be available; smotri said: %s ' % (video_id, warning)) # Adult content if 'EroConfirmText">' in webpage: self.report_age_confirmation() confirm_string = self._html_search_regex( r'<a[^>]+href="/video/view/\?id=%s&confirm=([^"]+)"' % video_id, webpage, 'confirm string') confirm_url = webpage_url + '&confirm=%s' % confirm_string webpage = self._download_webpage( confirm_url, video_id, 'Downloading video page (age confirmed)') adult_content = True else: adult_content = False view_count = self._html_search_regex( r'(?s)Общее количество просмотров.*?<span class="Number">(\d+)</span>', webpage, 'view count', fatal=False) return { 'id': video_id, 'url': video_url, 'title': title, 'thumbnail': thumbnail, 'uploader': uploader, 'upload_date': upload_date, 'uploader_id': uploader_id, 'duration': duration, 'view_count': int_or_none(view_count), 'age_limit': 18 if adult_content else 0, } class SmotriCommunityIE(InfoExtractor): IE_DESC = 'Smotri.com community videos' IE_NAME = 'smotri:community' _VALID_URL = r'https?://(?:www\.)?smotri\.com/community/video/(?P<id>[0-9A-Za-z_\'-]+)' _TEST = { 'url': 'http://smotri.com/community/video/kommuna', 'info_dict': { 'id': 'kommuna', }, 'playlist_mincount': 4, } def _real_extract(self, url): community_id = self._match_id(url) rss = self._download_xml( 'http://smotri.com/export/rss/video/by/community/-/%s/video.xml' % community_id, community_id, 'Downloading community RSS') entries = [ self.url_result(video_url.text, SmotriIE.ie_key()) for video_url in rss.findall('./channel/item/link')] return self.playlist_result(entries, community_id) class SmotriUserIE(InfoExtractor): IE_DESC = 'Smotri.com user videos' IE_NAME = 'smotri:user' _VALID_URL = r'https?://(?:www\.)?smotri\.com/user/(?P<id>[0-9A-Za-z_\'-]+)' _TESTS = [{ 'url': 'http://smotri.com/user/inspector', 'info_dict': { 'id': 'inspector', 'title': 'Inspector', }, 'playlist_mincount': 9, }] def _real_extract(self, url): user_id = self._match_id(url) rss = self._download_xml( 'http://smotri.com/export/rss/user/video/-/%s/video.xml' % user_id, user_id, 'Downloading user RSS') entries = [self.url_result(video_url.text, 'Smotri') for video_url in rss.findall('./channel/item/link')] description_text = xpath_text(rss, './channel/description') or '' user_nickname = self._search_regex( '^Видео режиссера (.+)$', description_text, 'user nickname', fatal=False) return self.playlist_result(entries, user_id, user_nickname) class SmotriBroadcastIE(InfoExtractor): IE_DESC = 'Smotri.com broadcasts' IE_NAME = 'smotri:broadcast' _VALID_URL = r'https?://(?:www\.)?(?P<url>smotri\.com/live/(?P<id>[^/]+))/?.*' _NETRC_MACHINE = 'smotri' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) broadcast_id = mobj.group('id') broadcast_url = 'http://' + mobj.group('url') broadcast_page = self._download_webpage(broadcast_url, broadcast_id, 'Downloading broadcast page') if re.search('>Режиссер с логином <br/>"%s"<br/> <span>не существует<' % broadcast_id, broadcast_page) is not None: raise ExtractorError( 'Broadcast %s does not exist' % broadcast_id, expected=True) # Adult content if re.search('EroConfirmText">', broadcast_page) is not None: (username, password) = self._get_login_info() if username is None: self.raise_login_required( 'Erotic broadcasts allowed only for registered users') login_form = { 'login-hint53': '1', 'confirm_erotic': '1', 'login': username, 'password': password, } request = sanitized_Request( broadcast_url + '/?no_redirect=1', urlencode_postdata(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') broadcast_page = self._download_webpage( request, broadcast_id, 'Logging in and confirming age') if '>Неверный логин или пароль<' in broadcast_page: raise ExtractorError( 'Unable to log in: bad username or password', expected=True) adult_content = True else: adult_content = False ticket = self._html_search_regex( (r'data-user-file=(["\'])(?P<ticket>(?!\1).+)\1', r"window\.broadcast_control\.addFlashVar\('file'\s*,\s*'(?P<ticket>[^']+)'\)"), broadcast_page, 'broadcast ticket', group='ticket') broadcast_url = 'http://smotri.com/broadcast/view/url/?ticket=%s' % ticket broadcast_password = self._downloader.params.get('videopassword') if broadcast_password: broadcast_url += '&pass=%s' % hashlib.md5(broadcast_password.encode('utf-8')).hexdigest() broadcast_json_page = self._download_webpage( broadcast_url, broadcast_id, 'Downloading broadcast JSON') try: broadcast_json = json.loads(broadcast_json_page) protected_broadcast = broadcast_json['_pass_protected'] == 1 if protected_broadcast and not broadcast_password: raise ExtractorError( 'This broadcast is protected by a password, use the --video-password option', expected=True) broadcast_offline = broadcast_json['is_play'] == 0 if broadcast_offline: raise ExtractorError('Broadcast %s is offline' % broadcast_id, expected=True) rtmp_url = broadcast_json['_server'] mobj = re.search(r'^rtmp://[^/]+/(?P<app>.+)/?$', rtmp_url) if not mobj: raise ExtractorError('Unexpected broadcast rtmp URL') broadcast_playpath = broadcast_json['_streamName'] broadcast_app = '%s/%s' % (mobj.group('app'), broadcast_json['_vidURL']) broadcast_thumbnail = broadcast_json.get('_imgURL') broadcast_title = self._live_title(broadcast_json['title']) broadcast_description = broadcast_json.get('description') broadcaster_nick = broadcast_json.get('nick') broadcaster_login = broadcast_json.get('login') rtmp_conn = 'S:%s' % uuid.uuid4().hex except KeyError: if protected_broadcast: raise ExtractorError('Bad broadcast password', expected=True) raise ExtractorError('Unexpected broadcast JSON') return { 'id': broadcast_id, 'url': rtmp_url, 'title': broadcast_title, 'thumbnail': broadcast_thumbnail, 'description': broadcast_description, 'uploader': broadcaster_nick, 'uploader_id': broadcaster_login, 'age_limit': 18 if adult_content else 0, 'ext': 'flv', 'play_path': broadcast_playpath, 'player_url': 'http://pics.smotri.com/broadcast_play.swf', 'app': broadcast_app, 'rtmp_live': True, 'rtmp_conn': rtmp_conn, 'is_live': True, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/snotr.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( parse_duration, parse_filesize, str_to_int, ) class SnotrIE(InfoExtractor): _VALID_URL = r'http?://(?:www\.)?snotr\.com/video/(?P<id>\d+)/([\w]+)' _TESTS = [{ 'url': 'http://www.snotr.com/video/13708/Drone_flying_through_fireworks', 'info_dict': { 'id': '13708', 'ext': 'mp4', 'title': 'Drone flying through fireworks!', 'duration': 248, 'filesize_approx': 40700000, 'description': 'A drone flying through Fourth of July Fireworks', 'thumbnail': r're:^https?://.*\.jpg$', }, 'expected_warnings': ['description'], }, { 'url': 'http://www.snotr.com/video/530/David_Letteman_-_George_W_Bush_Top_10', 'info_dict': { 'id': '530', 'ext': 'mp4', 'title': 'David Letteman - George W. Bush Top 10', 'duration': 126, 'filesize_approx': 8500000, 'description': 'The top 10 George W. Bush moments, brought to you by David Letterman!', 'thumbnail': r're:^https?://.*\.jpg$', } }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage) description = self._og_search_description(webpage) info_dict = self._parse_html5_media_entries( url, webpage, video_id, m3u8_entry_protocol='m3u8_native')[0] view_count = str_to_int(self._html_search_regex( r'<p[^>]*>\s*<strong[^>]*>Views:</strong>\s*<span[^>]*>([\d,\.]+)', webpage, 'view count', fatal=False)) duration = parse_duration(self._html_search_regex( r'<p[^>]*>\s*<strong[^>]*>Length:</strong>\s*<span[^>]*>([\d:]+)', webpage, 'duration', fatal=False)) filesize_approx = parse_filesize(self._html_search_regex( r'<p[^>]*>\s*<strong[^>]*>Filesize:</strong>\s*<span[^>]*>([^<]+)', webpage, 'filesize', fatal=False)) info_dict.update({ 'id': video_id, 'description': description, 'title': title, 'view_count': view_count, 'duration': duration, 'filesize_approx': filesize_approx, }) return info_dict
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sohu.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse_urlencode, ) from ..utils import ( ExtractorError, int_or_none, try_get, ) class SohuIE(InfoExtractor): _VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?' # Sohu videos give different MD5 sums on Travis CI and my machine _TESTS = [{ 'note': 'This video is available only in Mainland China', 'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super', 'info_dict': { 'id': '382479172', 'ext': 'mp4', 'title': 'MV:Far East Movement《The Illest》', }, 'skip': 'On available in China', }, { 'url': 'http://tv.sohu.com/20150305/n409385080.shtml', 'info_dict': { 'id': '409385080', 'ext': 'mp4', 'title': '《2015湖南卫视羊年元宵晚会》唐嫣《花好月圆》', } }, { 'url': 'http://my.tv.sohu.com/us/232799889/78693464.shtml', 'info_dict': { 'id': '78693464', 'ext': 'mp4', 'title': '【爱范品】第31期:MWC见不到的奇葩手机', } }, { 'note': 'Multipart video', 'url': 'http://my.tv.sohu.com/pl/8384802/78910339.shtml', 'info_dict': { 'id': '78910339', 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', }, 'playlist': [{ 'info_dict': { 'id': '78910339_part1', 'ext': 'mp4', 'duration': 294, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', } }, { 'info_dict': { 'id': '78910339_part2', 'ext': 'mp4', 'duration': 300, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', } }, { 'info_dict': { 'id': '78910339_part3', 'ext': 'mp4', 'duration': 150, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', } }] }, { 'note': 'Video with title containing dash', 'url': 'http://my.tv.sohu.com/us/249884221/78932792.shtml', 'info_dict': { 'id': '78932792', 'ext': 'mp4', 'title': 'youtube-dl testing video', }, 'params': { 'skip_download': True } }] def _real_extract(self, url): def _fetch_data(vid_id, mytv=False): if mytv: base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid=' else: base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid=' return self._download_json( base_data_url + vid_id, video_id, 'Downloading JSON data for %s' % vid_id, headers=self.geo_verification_headers()) mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') mytv = mobj.group('mytv') is not None webpage = self._download_webpage(url, video_id) title = re.sub(r' - 搜狐视频$', '', self._og_search_title(webpage)) vid = self._html_search_regex( r'var vid ?= ?["\'](\d+)["\']', webpage, 'video path') vid_data = _fetch_data(vid, mytv) if vid_data['play'] != 1: if vid_data.get('status') == 12: raise ExtractorError( '%s said: There\'s something wrong in the video.' % self.IE_NAME, expected=True) else: self.raise_geo_restricted( '%s said: The video is only licensed to users in Mainland China.' % self.IE_NAME) formats_json = {} for format_id in ('nor', 'high', 'super', 'ori', 'h2644k', 'h2654k'): vid_id = vid_data['data'].get('%sVid' % format_id) if not vid_id: continue vid_id = compat_str(vid_id) formats_json[format_id] = vid_data if vid == vid_id else _fetch_data(vid_id, mytv) part_count = vid_data['data']['totalBlocks'] playlist = [] for i in range(part_count): formats = [] for format_id, format_data in formats_json.items(): allot = format_data['allot'] data = format_data['data'] clips_url = data['clipsURL'] su = data['su'] video_url = 'newflv.sohu.ccgslb.net' cdnId = None retries = 0 while 'newflv.sohu.ccgslb.net' in video_url: params = { 'prot': 9, 'file': clips_url[i], 'new': su[i], 'prod': 'flash', 'rb': 1, } if cdnId is not None: params['idc'] = cdnId download_note = 'Downloading %s video URL part %d of %d' % ( format_id, i + 1, part_count) if retries > 0: download_note += ' (retry #%d)' % retries part_info = self._parse_json(self._download_webpage( 'http://%s/?%s' % (allot, compat_urllib_parse_urlencode(params)), video_id, download_note), video_id) video_url = part_info['url'] cdnId = part_info.get('nid') retries += 1 if retries > 5: raise ExtractorError('Failed to get video URL') formats.append({ 'url': video_url, 'format_id': format_id, 'filesize': int_or_none( try_get(data, lambda x: x['clipsBytes'][i])), 'width': int_or_none(data.get('width')), 'height': int_or_none(data.get('height')), 'fps': int_or_none(data.get('fps')), }) self._sort_formats(formats) playlist.append({ 'id': '%s_part%d' % (video_id, i + 1), 'title': title, 'duration': vid_data['data']['clipsDuration'][i], 'formats': formats, }) if len(playlist) == 1: info = playlist[0] info['id'] = video_id else: info = { '_type': 'multi_video', 'entries': playlist, 'id': video_id, 'title': title, } return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sonyliv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import smuggle_url class SonyLIVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?sonyliv\.com/details/[^/]+/(?P<id>\d+)' _TESTS = [{ 'url': "http://www.sonyliv.com/details/episodes/5024612095001/Ep.-1---Achaari-Cheese-Toast---Bachelor's-Delight", 'info_dict': { 'title': "Ep. 1 - Achaari Cheese Toast - Bachelor's Delight", 'id': 'ref:5024612095001', 'ext': 'mp4', 'upload_date': '20170923', 'description': 'md5:7f28509a148d5be9d0782b4d5106410d', 'uploader_id': '5182475815001', 'timestamp': 1506200547, }, 'params': { 'skip_download': True, }, 'add_ie': ['BrightcoveNew'], }, { 'url': 'http://www.sonyliv.com/details/full%20movie/4951168986001/Sei-Raat-(Bangla)', 'only_matching': True, }] # BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/4338955589001/default_default/index.html?videoId=%s' BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/5182475815001/default_default/index.html?videoId=ref:%s' def _real_extract(self, url): brightcove_id = self._match_id(url) return self.url_result( smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, { 'geo_countries': ['IN'], 'referrer': url, }), 'BrightcoveNew', brightcove_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/soundcloud.py
# coding: utf-8 from __future__ import unicode_literals import itertools import re from .common import ( InfoExtractor, SearchInfoExtractor ) from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( ExtractorError, float_or_none, HEADRequest, int_or_none, KNOWN_EXTENSIONS, mimetype2ext, str_or_none, try_get, unified_timestamp, update_url_query, url_or_none, ) class SoundcloudEmbedIE(InfoExtractor): _VALID_URL = r'https?://(?:w|player|p)\.soundcloud\.com/player/?.*?url=(?P<id>.*)' @staticmethod def _extract_urls(webpage): return [m.group('url') for m in re.finditer( r'<iframe[^>]+src=(["\'])(?P<url>(?:https?://)?(?:w\.)?soundcloud\.com/player.+?)\1', webpage)] def _real_extract(self, url): return self.url_result(compat_urlparse.parse_qs( compat_urlparse.urlparse(url).query)['url'][0]) class SoundcloudIE(InfoExtractor): """Information extractor for soundcloud.com To access the media, the uid of the song and a stream token must be extracted from the page source and the script must make a request to media.soundcloud.com/crossdomain.xml. Then the media can be grabbed by requesting from an url composed of the stream token and uid """ _VALID_URL = r'''(?x)^(?:https?://)? (?:(?:(?:www\.|m\.)?soundcloud\.com/ (?!stations/track) (?P<uploader>[\w\d-]+)/ (?!(?:tracks|albums|sets(?:/.+?)?|reposts|likes|spotlight)/?(?:$|[?#])) (?P<title>[\w\d-]+)/? (?P<token>[^?]+?)?(?:[?].*)?$) |(?:api(?:-v2)?\.soundcloud\.com/tracks/(?P<track_id>\d+) (?:/?\?secret_token=(?P<secret_token>[^&]+))?) ) ''' IE_NAME = 'soundcloud' _TESTS = [ { 'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy', 'md5': 'ebef0a451b909710ed1d7787dddbf0d7', 'info_dict': { 'id': '62986583', 'ext': 'mp3', 'title': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1', 'description': 'No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o\'d', 'uploader': 'E.T. ExTerrestrial Music', 'uploader_id': '1571244', 'timestamp': 1349920598, 'upload_date': '20121011', 'duration': 143.216, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, } }, # not streamable song { 'url': 'https://soundcloud.com/the-concept-band/goldrushed-mastered?in=the-concept-band/sets/the-royal-concept-ep', 'info_dict': { 'id': '47127627', 'ext': 'mp3', 'title': 'Goldrushed', 'description': 'From Stockholm Sweden\r\nPovel / Magnus / Filip / David\r\nwww.theroyalconcept.com', 'uploader': 'The Royal Concept', 'uploader_id': '9615865', 'timestamp': 1337635207, 'upload_date': '20120521', 'duration': 30, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, }, 'params': { # rtmp 'skip_download': True, }, 'skip': 'Preview', }, # private link { 'url': 'https://soundcloud.com/jaimemf/youtube-dl-test-video-a-y-baw/s-8Pjrp', 'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604', 'info_dict': { 'id': '123998367', 'ext': 'mp3', 'title': 'Youtube - Dl Test Video \'\' Ä↭', 'description': 'test chars: \"\'/\\ä↭', 'uploader': 'jaimeMF', 'uploader_id': '69767071', 'timestamp': 1386604920, 'upload_date': '20131209', 'duration': 9.927, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, }, }, # private link (alt format) { 'url': 'https://api.soundcloud.com/tracks/123998367?secret_token=s-8Pjrp', 'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604', 'info_dict': { 'id': '123998367', 'ext': 'mp3', 'title': 'Youtube - Dl Test Video \'\' Ä↭', 'description': 'test chars: \"\'/\\ä↭', 'uploader': 'jaimeMF', 'uploader_id': '69767071', 'timestamp': 1386604920, 'upload_date': '20131209', 'duration': 9.927, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, }, }, # downloadable song { 'url': 'https://soundcloud.com/oddsamples/bus-brakes', 'md5': '7624f2351f8a3b2e7cd51522496e7631', 'info_dict': { 'id': '128590877', 'ext': 'mp3', 'title': 'Bus Brakes', 'description': 'md5:0053ca6396e8d2fd7b7e1595ef12ab66', 'uploader': 'oddsamples', 'uploader_id': '73680509', 'timestamp': 1389232924, 'upload_date': '20140109', 'duration': 17.346, 'license': 'cc-by-sa', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, }, }, # private link, downloadable format { 'url': 'https://soundcloud.com/oriuplift/uponly-238-no-talking-wav/s-AyZUd', 'md5': '64a60b16e617d41d0bef032b7f55441e', 'info_dict': { 'id': '340344461', 'ext': 'wav', 'title': 'Uplifting Only 238 [No Talking] (incl. Alex Feed Guestmix) (Aug 31, 2017) [wav]', 'description': 'md5:fa20ee0fca76a3d6df8c7e57f3715366', 'uploader': 'Ori Uplift Music', 'uploader_id': '12563093', 'timestamp': 1504206263, 'upload_date': '20170831', 'duration': 7449.096, 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, }, }, # no album art, use avatar pic for thumbnail { 'url': 'https://soundcloud.com/garyvee/sideways-prod-mad-real', 'md5': '59c7872bc44e5d99b7211891664760c2', 'info_dict': { 'id': '309699954', 'ext': 'mp3', 'title': 'Sideways (Prod. Mad Real)', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'uploader': 'garyvee', 'uploader_id': '2366352', 'timestamp': 1488152409, 'upload_date': '20170226', 'duration': 207.012, 'thumbnail': r're:https?://.*\.jpg', 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, }, 'params': { 'skip_download': True, }, }, # not available via api.soundcloud.com/i1/tracks/id/streams { 'url': 'https://soundcloud.com/giovannisarani/mezzo-valzer', 'md5': 'e22aecd2bc88e0e4e432d7dcc0a1abf7', 'info_dict': { 'id': '583011102', 'ext': 'mp3', 'title': 'Mezzo Valzer', 'description': 'md5:4138d582f81866a530317bae316e8b61', 'uploader': 'Giovanni Sarani', 'uploader_id': '3352531', 'timestamp': 1551394171, 'upload_date': '20190228', 'duration': 180.157, 'thumbnail': r're:https?://.*\.jpg', 'license': 'all-rights-reserved', 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, }, 'expected_warnings': ['Unable to download JSON metadata'], } ] _API_BASE = 'https://api.soundcloud.com/' _API_V2_BASE = 'https://api-v2.soundcloud.com/' _BASE_URL = 'https://soundcloud.com/' _CLIENT_ID = 'BeGVhOrGmfboy1LtiHTQF6Ejpt9ULJCI' _IMAGE_REPL_RE = r'-([0-9a-z]+)\.jpg' _ARTWORK_MAP = { 'mini': 16, 'tiny': 20, 'small': 32, 'badge': 47, 't67x67': 67, 'large': 100, 't300x300': 300, 'crop': 400, 't500x500': 500, 'original': 0, } @classmethod def _resolv_url(cls, url): return SoundcloudIE._API_V2_BASE + 'resolve?url=' + url + '&client_id=' + cls._CLIENT_ID def _extract_info_dict(self, info, full_title=None, secret_token=None, version=2): track_id = compat_str(info['id']) title = info['title'] track_base_url = self._API_BASE + 'tracks/%s' % track_id format_urls = set() formats = [] query = {'client_id': self._CLIENT_ID} if secret_token: query['secret_token'] = secret_token if info.get('downloadable'): format_url = update_url_query( info.get('download_url') or track_base_url + '/download', query) format_urls.add(format_url) if version == 2: v1_info = self._download_json( track_base_url, track_id, query=query, fatal=False) or {} else: v1_info = info formats.append({ 'format_id': 'download', 'ext': v1_info.get('original_format') or 'mp3', 'filesize': int_or_none(v1_info.get('original_content_size')), 'url': format_url, 'preference': 10, }) def invalid_url(url): return not url or url in format_urls or re.search(r'/(?:preview|playlist)/0/30/', url) def add_format(f, protocol): mobj = re.search(r'\.(?P<abr>\d+)\.(?P<ext>[0-9a-z]{3,4})(?=[/?])', stream_url) if mobj: for k, v in mobj.groupdict().items(): if not f.get(k): f[k] = v format_id_list = [] if protocol: format_id_list.append(protocol) for k in ('ext', 'abr'): v = f.get(k) if v: format_id_list.append(v) abr = f.get('abr') if abr: f['abr'] = int(abr) f.update({ 'format_id': '_'.join(format_id_list), 'protocol': 'm3u8_native' if protocol == 'hls' else 'http', }) formats.append(f) # New API transcodings = try_get( info, lambda x: x['media']['transcodings'], list) or [] for t in transcodings: if not isinstance(t, dict): continue format_url = url_or_none(t.get('url')) if not format_url or t.get('snipped') or '/preview/' in format_url: continue stream = self._download_json( format_url, track_id, query=query, fatal=False) if not isinstance(stream, dict): continue stream_url = url_or_none(stream.get('url')) if invalid_url(stream_url): continue format_urls.add(stream_url) stream_format = t.get('format') or {} protocol = stream_format.get('protocol') if protocol != 'hls' and '/hls' in format_url: protocol = 'hls' ext = None preset = str_or_none(t.get('preset')) if preset: ext = preset.split('_')[0] if ext not in KNOWN_EXTENSIONS: ext = mimetype2ext(stream_format.get('mime_type')) add_format({ 'url': stream_url, 'ext': ext, }, 'http' if protocol == 'progressive' else protocol) if not formats: # Old API, does not work for some tracks (e.g. # https://soundcloud.com/giovannisarani/mezzo-valzer) # and might serve preview URLs (e.g. # http://www.soundcloud.com/snbrn/ele) format_dict = self._download_json( track_base_url + '/streams', track_id, 'Downloading track url', query=query, fatal=False) or {} for key, stream_url in format_dict.items(): if invalid_url(stream_url): continue format_urls.add(stream_url) mobj = re.search(r'(http|hls)_([^_]+)_(\d+)_url', key) if mobj: protocol, ext, abr = mobj.groups() add_format({ 'abr': abr, 'ext': ext, 'url': stream_url, }, protocol) if not formats: # We fallback to the stream_url in the original info, this # cannot be always used, sometimes it can give an HTTP 404 error urlh = self._request_webpage( HEADRequest(info.get('stream_url') or track_base_url + '/stream'), track_id, query=query, fatal=False) if urlh: stream_url = urlh.geturl() if not invalid_url(stream_url): add_format({'url': stream_url}, 'http') for f in formats: f['vcodec'] = 'none' self._sort_formats(formats) user = info.get('user') or {} thumbnails = [] artwork_url = info.get('artwork_url') thumbnail = artwork_url or user.get('avatar_url') if isinstance(thumbnail, compat_str): if re.search(self._IMAGE_REPL_RE, thumbnail): for image_id, size in self._ARTWORK_MAP.items(): i = { 'id': image_id, 'url': re.sub(self._IMAGE_REPL_RE, '-%s.jpg' % image_id, thumbnail), } if image_id == 'tiny' and not artwork_url: size = 18 elif image_id == 'original': i['preference'] = 10 if size: i.update({ 'width': size, 'height': size, }) thumbnails.append(i) else: thumbnails = [{'url': thumbnail}] def extract_count(key): return int_or_none(info.get('%s_count' % key)) return { 'id': track_id, 'uploader': user.get('username'), 'uploader_id': str_or_none(user.get('id')) or user.get('permalink'), 'uploader_url': user.get('permalink_url'), 'timestamp': unified_timestamp(info.get('created_at')), 'title': title, 'description': info.get('description'), 'thumbnails': thumbnails, 'duration': float_or_none(info.get('duration'), 1000), 'webpage_url': info.get('permalink_url'), 'license': info.get('license'), 'view_count': extract_count('playback'), 'like_count': extract_count('favoritings') or extract_count('likes'), 'comment_count': extract_count('comment'), 'repost_count': extract_count('reposts'), 'genre': info.get('genre'), 'formats': formats } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) track_id = mobj.group('track_id') query = { 'client_id': self._CLIENT_ID, } if track_id: info_json_url = self._API_V2_BASE + 'tracks/' + track_id full_title = track_id token = mobj.group('secret_token') if token: query['secret_token'] = token else: full_title = resolve_title = '%s/%s' % mobj.group('uploader', 'title') token = mobj.group('token') if token: resolve_title += '/%s' % token info_json_url = self._resolv_url(self._BASE_URL + resolve_title) version = 2 info = self._download_json( info_json_url, full_title, 'Downloading info JSON', query=query, fatal=False) if not info: info = self._download_json( info_json_url.replace(self._API_V2_BASE, self._API_BASE), full_title, 'Downloading info JSON', query=query) version = 1 return self._extract_info_dict(info, full_title, token, version) class SoundcloudPlaylistBaseIE(SoundcloudIE): def _extract_track_entries(self, tracks, token=None): entries = [] for track in tracks: track_id = str_or_none(track.get('id')) url = track.get('permalink_url') if not url: if not track_id: continue url = self._API_V2_BASE + 'tracks/' + track_id if token: url += '?secret_token=' + token entries.append(self.url_result( url, SoundcloudIE.ie_key(), track_id)) return entries class SoundcloudSetIE(SoundcloudPlaylistBaseIE): _VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/(?P<uploader>[\w\d-]+)/sets/(?P<slug_title>[\w\d-]+)(?:/(?P<token>[^?/]+))?' IE_NAME = 'soundcloud:set' _TESTS = [{ 'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep', 'info_dict': { 'id': '2284613', 'title': 'The Royal Concept EP', }, 'playlist_mincount': 5, }, { 'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep/token', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) full_title = '%s/sets/%s' % mobj.group('uploader', 'slug_title') token = mobj.group('token') if token: full_title += '/' + token info = self._download_json(self._resolv_url( self._BASE_URL + full_title), full_title) if 'errors' in info: msgs = (compat_str(err['error_message']) for err in info['errors']) raise ExtractorError('unable to download video webpage: %s' % ','.join(msgs)) entries = self._extract_track_entries(info['tracks'], token) return self.playlist_result( entries, str_or_none(info.get('id')), info.get('title')) class SoundcloudPagedPlaylistBaseIE(SoundcloudPlaylistBaseIE): def _extract_playlist(self, base_url, playlist_id, playlist_title): COMMON_QUERY = { 'limit': 2000000000, 'client_id': self._CLIENT_ID, 'linked_partitioning': '1', } query = COMMON_QUERY.copy() query['offset'] = 0 next_href = base_url entries = [] for i in itertools.count(): response = self._download_json( next_href, playlist_id, 'Downloading track page %s' % (i + 1), query=query) collection = response['collection'] if not isinstance(collection, list): collection = [] # Empty collection may be returned, in this case we proceed # straight to next_href def resolve_entry(candidates): for cand in candidates: if not isinstance(cand, dict): continue permalink_url = url_or_none(cand.get('permalink_url')) if not permalink_url: continue return self.url_result( permalink_url, SoundcloudIE.ie_key() if SoundcloudIE.suitable(permalink_url) else None, str_or_none(cand.get('id')), cand.get('title')) for e in collection: entry = resolve_entry((e, e.get('track'), e.get('playlist'))) if entry: entries.append(entry) next_href = response.get('next_href') if not next_href: break next_href = response['next_href'] parsed_next_href = compat_urlparse.urlparse(next_href) query = compat_urlparse.parse_qs(parsed_next_href.query) query.update(COMMON_QUERY) return { '_type': 'playlist', 'id': playlist_id, 'title': playlist_title, 'entries': entries, } class SoundcloudUserIE(SoundcloudPagedPlaylistBaseIE): _VALID_URL = r'''(?x) https?:// (?:(?:www|m)\.)?soundcloud\.com/ (?P<user>[^/]+) (?:/ (?P<rsrc>tracks|albums|sets|reposts|likes|spotlight) )? /?(?:[?#].*)?$ ''' IE_NAME = 'soundcloud:user' _TESTS = [{ 'url': 'https://soundcloud.com/soft-cell-official', 'info_dict': { 'id': '207965082', 'title': 'Soft Cell (All)', }, 'playlist_mincount': 28, }, { 'url': 'https://soundcloud.com/soft-cell-official/tracks', 'info_dict': { 'id': '207965082', 'title': 'Soft Cell (Tracks)', }, 'playlist_mincount': 27, }, { 'url': 'https://soundcloud.com/soft-cell-official/albums', 'info_dict': { 'id': '207965082', 'title': 'Soft Cell (Albums)', }, 'playlist_mincount': 1, }, { 'url': 'https://soundcloud.com/jcv246/sets', 'info_dict': { 'id': '12982173', 'title': 'Jordi / cv (Sets)', }, 'playlist_mincount': 2, }, { 'url': 'https://soundcloud.com/jcv246/reposts', 'info_dict': { 'id': '12982173', 'title': 'Jordi / cv (Reposts)', }, 'playlist_mincount': 6, }, { 'url': 'https://soundcloud.com/clalberg/likes', 'info_dict': { 'id': '11817582', 'title': 'clalberg (Likes)', }, 'playlist_mincount': 5, }, { 'url': 'https://soundcloud.com/grynpyret/spotlight', 'info_dict': { 'id': '7098329', 'title': 'Grynpyret (Spotlight)', }, 'playlist_mincount': 1, }] _BASE_URL_MAP = { 'all': 'stream/users/%s', 'tracks': 'users/%s/tracks', 'albums': 'users/%s/albums', 'sets': 'users/%s/playlists', 'reposts': 'stream/users/%s/reposts', 'likes': 'users/%s/likes', 'spotlight': 'users/%s/spotlight', } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) uploader = mobj.group('user') user = self._download_json( self._resolv_url(self._BASE_URL + uploader), uploader, 'Downloading user info') resource = mobj.group('rsrc') or 'all' return self._extract_playlist( self._API_V2_BASE + self._BASE_URL_MAP[resource] % user['id'], str_or_none(user.get('id')), '%s (%s)' % (user['username'], resource.capitalize())) class SoundcloudTrackStationIE(SoundcloudPagedPlaylistBaseIE): _VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/stations/track/[^/]+/(?P<id>[^/?#&]+)' IE_NAME = 'soundcloud:trackstation' _TESTS = [{ 'url': 'https://soundcloud.com/stations/track/officialsundial/your-text', 'info_dict': { 'id': '286017854', 'title': 'Track station: your text', }, 'playlist_mincount': 47, }] def _real_extract(self, url): track_name = self._match_id(url) track = self._download_json(self._resolv_url(url), track_name) track_id = self._search_regex( r'soundcloud:track-stations:(\d+)', track['id'], 'track id') return self._extract_playlist( self._API_V2_BASE + 'stations/%s/tracks' % track['id'], track_id, 'Track station: %s' % track['title']) class SoundcloudPlaylistIE(SoundcloudPlaylistBaseIE): _VALID_URL = r'https?://api(?:-v2)?\.soundcloud\.com/playlists/(?P<id>[0-9]+)(?:/?\?secret_token=(?P<token>[^&]+?))?$' IE_NAME = 'soundcloud:playlist' _TESTS = [{ 'url': 'https://api.soundcloud.com/playlists/4110309', 'info_dict': { 'id': '4110309', 'title': 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]', 'description': 're:.*?TILT Brass - Bowery Poetry Club', }, 'playlist_count': 6, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) playlist_id = mobj.group('id') query = { 'client_id': self._CLIENT_ID, } token = mobj.group('token') if token: query['secret_token'] = token data = self._download_json( self._API_V2_BASE + 'playlists/' + playlist_id, playlist_id, 'Downloading playlist', query=query) entries = self._extract_track_entries(data['tracks'], token) return self.playlist_result( entries, playlist_id, data.get('title'), data.get('description')) class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE): IE_NAME = 'soundcloud:search' IE_DESC = 'Soundcloud search' _MAX_RESULTS = float('inf') _TESTS = [{ 'url': 'scsearch15:post-avant jazzcore', 'info_dict': { 'title': 'post-avant jazzcore', }, 'playlist_count': 15, }] _SEARCH_KEY = 'scsearch' _MAX_RESULTS_PER_PAGE = 200 _DEFAULT_RESULTS_PER_PAGE = 50 def _get_collection(self, endpoint, collection_id, **query): limit = min( query.get('limit', self._DEFAULT_RESULTS_PER_PAGE), self._MAX_RESULTS_PER_PAGE) query.update({ 'limit': limit, 'client_id': self._CLIENT_ID, 'linked_partitioning': 1, 'offset': 0, }) next_url = update_url_query(self._API_V2_BASE + endpoint, query) collected_results = 0 for i in itertools.count(1): response = self._download_json( next_url, collection_id, 'Downloading page {0}'.format(i), 'Unable to download API page') collection = response.get('collection', []) if not collection: break collection = list(filter(bool, collection)) collected_results += len(collection) for item in collection: yield self.url_result(item['uri'], SoundcloudIE.ie_key()) if not collection or collected_results >= limit: break next_url = response.get('next_href') if not next_url: break def _get_n_results(self, query, n): tracks = self._get_collection('search/tracks', query, limit=n, q=query) return self.playlist_result(tracks, playlist_title=query)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/soundgasm.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class SoundgasmIE(InfoExtractor): IE_NAME = 'soundgasm' _VALID_URL = r'https?://(?:www\.)?soundgasm\.net/u/(?P<user>[0-9a-zA-Z_-]+)/(?P<display_id>[0-9a-zA-Z_-]+)' _TEST = { 'url': 'http://soundgasm.net/u/ytdl/Piano-sample', 'md5': '010082a2c802c5275bb00030743e75ad', 'info_dict': { 'id': '88abd86ea000cafe98f96321b23cc1206cbcbcc9', 'ext': 'm4a', 'title': 'Piano sample', 'description': 'Royalty Free Sample Music', 'uploader': 'ytdl', } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id = mobj.group('display_id') webpage = self._download_webpage(url, display_id) audio_url = self._html_search_regex( r'(?s)m4a\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'audio URL', group='url') title = self._search_regex( r'<div[^>]+\bclass=["\']jp-title[^>]+>([^<]+)', webpage, 'title', default=display_id) description = self._html_search_regex( (r'(?s)<div[^>]+\bclass=["\']jp-description[^>]+>(.+?)</div>', r'(?s)<li>Description:\s(.*?)<\/li>'), webpage, 'description', fatal=False) audio_id = self._search_regex( r'/([^/]+)\.m4a', audio_url, 'audio id', default=display_id) return { 'id': audio_id, 'display_id': display_id, 'url': audio_url, 'vcodec': 'none', 'title': title, 'description': description, 'uploader': mobj.group('user'), } class SoundgasmProfileIE(InfoExtractor): IE_NAME = 'soundgasm:profile' _VALID_URL = r'https?://(?:www\.)?soundgasm\.net/u/(?P<id>[^/]+)/?(?:\#.*)?$' _TEST = { 'url': 'http://soundgasm.net/u/ytdl', 'info_dict': { 'id': 'ytdl', }, 'playlist_count': 1, } def _real_extract(self, url): profile_id = self._match_id(url) webpage = self._download_webpage(url, profile_id) entries = [ self.url_result(audio_url, 'Soundgasm') for audio_url in re.findall(r'href="([^"]+/u/%s/[^"]+)' % profile_id, webpage)] return self.playlist_result(entries, profile_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/southpark.py
# coding: utf-8 from __future__ import unicode_literals from .mtv import MTVServicesInfoExtractor class SouthParkIE(MTVServicesInfoExtractor): IE_NAME = 'southpark.cc.com' _VALID_URL = r'https?://(?:www\.)?(?P<url>southpark\.cc\.com/(?:clips|(?:full-)?episodes|collections)/(?P<id>.+?)(\?|#|$))' _FEED_URL = 'http://www.southparkstudios.com/feeds/video-player/mrss' _TESTS = [{ 'url': 'http://southpark.cc.com/clips/104437/bat-daded#tab=featured', 'info_dict': { 'id': 'a7bff6c2-ed00-11e0-aca6-0026b9414f30', 'ext': 'mp4', 'title': 'South Park|Bat Daded', 'description': 'Randy disqualifies South Park by getting into a fight with Bat Dad.', 'timestamp': 1112760000, 'upload_date': '20050406', }, }, { 'url': 'http://southpark.cc.com/collections/7758/fan-favorites/1', 'only_matching': True, }] class SouthParkEsIE(SouthParkIE): IE_NAME = 'southpark.cc.com:español' _VALID_URL = r'https?://(?:www\.)?(?P<url>southpark\.cc\.com/episodios-en-espanol/(?P<id>.+?)(\?|#|$))' _LANG = 'es' _TESTS = [{ 'url': 'http://southpark.cc.com/episodios-en-espanol/s01e01-cartman-consigue-una-sonda-anal#source=351c1323-0b96-402d-a8b9-40d01b2e9bde&position=1&sort=!airdate', 'info_dict': { 'title': 'Cartman Consigue Una Sonda Anal', 'description': 'Cartman Consigue Una Sonda Anal', }, 'playlist_count': 4, 'skip': 'Geo-restricted', }] class SouthParkDeIE(SouthParkIE): IE_NAME = 'southpark.de' _VALID_URL = r'https?://(?:www\.)?(?P<url>southpark\.de/(?:clips|alle-episoden|collections)/(?P<id>.+?)(\?|#|$))' _FEED_URL = 'http://www.southpark.de/feeds/video-player/mrss/' _TESTS = [{ 'url': 'http://www.southpark.de/clips/uygssh/the-government-wont-respect-my-privacy#tab=featured', 'info_dict': { 'id': '85487c96-b3b9-4e39-9127-ad88583d9bf2', 'ext': 'mp4', 'title': 'South Park|The Government Won\'t Respect My Privacy', 'description': 'Cartman explains the benefits of "Shitter" to Stan, Kyle and Craig.', 'timestamp': 1380160800, 'upload_date': '20130926', }, }, { # non-ASCII characters in initial URL 'url': 'http://www.southpark.de/alle-episoden/s18e09-hashtag-aufwärmen', 'info_dict': { 'title': 'Hashtag „Aufwärmen“', 'description': 'Kyle will mit seinem kleinen Bruder Ike Videospiele spielen. Als der nicht mehr mit ihm spielen will, hat Kyle Angst, dass er die Kids von heute nicht mehr versteht.', }, 'playlist_count': 3, }, { # non-ASCII characters in redirect URL 'url': 'http://www.southpark.de/alle-episoden/s18e09', 'info_dict': { 'title': 'Hashtag „Aufwärmen“', 'description': 'Kyle will mit seinem kleinen Bruder Ike Videospiele spielen. Als der nicht mehr mit ihm spielen will, hat Kyle Angst, dass er die Kids von heute nicht mehr versteht.', }, 'playlist_count': 3, }, { 'url': 'http://www.southpark.de/collections/2476/superhero-showdown/1', 'only_matching': True, }] class SouthParkNlIE(SouthParkIE): IE_NAME = 'southpark.nl' _VALID_URL = r'https?://(?:www\.)?(?P<url>southpark\.nl/(?:clips|(?:full-)?episodes|collections)/(?P<id>.+?)(\?|#|$))' _FEED_URL = 'http://www.southpark.nl/feeds/video-player/mrss/' _TESTS = [{ 'url': 'http://www.southpark.nl/full-episodes/s18e06-freemium-isnt-free', 'info_dict': { 'title': 'Freemium Isn\'t Free', 'description': 'Stan is addicted to the new Terrance and Phillip mobile game.', }, 'playlist_mincount': 3, }] class SouthParkDkIE(SouthParkIE): IE_NAME = 'southparkstudios.dk' _VALID_URL = r'https?://(?:www\.)?(?P<url>southparkstudios\.(?:dk|nu)/(?:clips|full-episodes|collections)/(?P<id>.+?)(\?|#|$))' _FEED_URL = 'http://www.southparkstudios.dk/feeds/video-player/mrss/' _TESTS = [{ 'url': 'http://www.southparkstudios.dk/full-episodes/s18e07-grounded-vindaloop', 'info_dict': { 'title': 'Grounded Vindaloop', 'description': 'Butters is convinced he\'s living in a virtual reality.', }, 'playlist_mincount': 3, }, { 'url': 'http://www.southparkstudios.dk/collections/2476/superhero-showdown/1', 'only_matching': True, }, { 'url': 'http://www.southparkstudios.nu/collections/2476/superhero-showdown/1', 'only_matching': True, }]
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/spankbang.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, merge_dicts, orderedSet, parse_duration, parse_resolution, str_to_int, url_or_none, urlencode_postdata, ) class SpankBangIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?spankbang\.com/(?P<id>[\da-z]+)/(?:video|play|embed)\b' _TESTS = [{ 'url': 'http://spankbang.com/3vvn/video/fantasy+solo', 'md5': '1cc433e1d6aa14bc376535b8679302f7', 'info_dict': { 'id': '3vvn', 'ext': 'mp4', 'title': 'fantasy solo', 'description': 'dillion harper masturbates on a bed', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'silly2587', 'timestamp': 1422571989, 'upload_date': '20150129', 'age_limit': 18, } }, { # 480p only 'url': 'http://spankbang.com/1vt0/video/solvane+gangbang', 'only_matching': True, }, { # no uploader 'url': 'http://spankbang.com/lklg/video/sex+with+anyone+wedding+edition+2', 'only_matching': True, }, { # mobile page 'url': 'http://m.spankbang.com/1o2de/video/can+t+remember+her+name', 'only_matching': True, }, { # 4k 'url': 'https://spankbang.com/1vwqx/video/jade+kush+solo+4k', 'only_matching': True, }, { 'url': 'https://m.spankbang.com/3vvn/play/fantasy+solo/480p/', 'only_matching': True, }, { 'url': 'https://m.spankbang.com/3vvn/play', 'only_matching': True, }, { 'url': 'https://spankbang.com/2y3td/embed/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( url.replace('/%s/embed' % video_id, '/%s/video' % video_id), video_id, headers={'Cookie': 'country=US'}) if re.search(r'<[^>]+\bid=["\']video_removed', webpage): raise ExtractorError( 'Video %s is not available' % video_id, expected=True) formats = [] def extract_format(format_id, format_url): f_url = url_or_none(format_url) if not f_url: return f = parse_resolution(format_id) f.update({ 'url': f_url, 'format_id': format_id, }) formats.append(f) STREAM_URL_PREFIX = 'stream_url_' for mobj in re.finditer( r'%s(?P<id>[^\s=]+)\s*=\s*(["\'])(?P<url>(?:(?!\2).)+)\2' % STREAM_URL_PREFIX, webpage): extract_format(mobj.group('id', 'url')) if not formats: stream_key = self._search_regex( r'data-streamkey\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'stream key', group='value') sb_csrf_session = self._get_cookies( 'https://spankbang.com')['sb_csrf_session'].value stream = self._download_json( 'https://spankbang.com/api/videos/stream', video_id, 'Downloading stream JSON', data=urlencode_postdata({ 'id': stream_key, 'data': 0, 'sb_csrf_session': sb_csrf_session, }), headers={ 'Referer': url, 'X-CSRFToken': sb_csrf_session, }) for format_id, format_url in stream.items(): if format_id.startswith(STREAM_URL_PREFIX): if format_url and isinstance(format_url, list): format_url = format_url[0] extract_format( format_id[len(STREAM_URL_PREFIX):], format_url) self._sort_formats(formats) info = self._search_json_ld(webpage, video_id, default={}) title = self._html_search_regex( r'(?s)<h1[^>]*>(.+?)</h1>', webpage, 'title', default=None) description = self._search_regex( r'<div[^>]+\bclass=["\']bottom[^>]+>\s*<p>[^<]*</p>\s*<p>([^<]+)', webpage, 'description', default=None) thumbnail = self._og_search_thumbnail(webpage, default=None) uploader = self._html_search_regex( (r'(?s)<li[^>]+class=["\']profile[^>]+>(.+?)</a>', r'class="user"[^>]*><img[^>]+>([^<]+)'), webpage, 'uploader', default=None) duration = parse_duration(self._search_regex( r'<div[^>]+\bclass=["\']right_side[^>]+>\s*<span>([^<]+)', webpage, 'duration', default=None)) view_count = str_to_int(self._search_regex( r'([\d,.]+)\s+plays', webpage, 'view count', default=None)) age_limit = self._rta_search(webpage) return merge_dicts({ 'id': video_id, 'title': title or video_id, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'duration': duration, 'view_count': view_count, 'formats': formats, 'age_limit': age_limit, }, info ) class SpankBangPlaylistIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?spankbang\.com/(?P<id>[\da-z]+)/playlist/[^/]+' _TEST = { 'url': 'https://spankbang.com/ug0k/playlist/big+ass+titties', 'info_dict': { 'id': 'ug0k', 'title': 'Big Ass Titties', }, 'playlist_mincount': 50, } def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage( url, playlist_id, headers={'Cookie': 'country=US; mobile=on'}) entries = [self.url_result( 'https://spankbang.com/%s/video' % video_id, ie=SpankBangIE.ie_key(), video_id=video_id) for video_id in orderedSet(re.findall( r'<a[^>]+\bhref=["\']/?([\da-z]+)/play/', webpage))] title = self._html_search_regex( r'<h1>([^<]+)\s+playlist</h1>', webpage, 'playlist title', fatal=False) return self.playlist_result(entries, playlist_id, title)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/spankwire.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_urllib_parse_unquote, compat_urllib_parse_urlparse, ) from ..utils import ( sanitized_Request, str_to_int, unified_strdate, ) from ..aes import aes_decrypt_text class SpankwireIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<id>[0-9]+)/?)' _TESTS = [{ # download URL pattern: */<height>P_<tbr>K_<video_id>.mp4 'url': 'http://www.spankwire.com/Buckcherry-s-X-Rated-Music-Video-Crazy-Bitch/video103545/', 'md5': '8bbfde12b101204b39e4b9fe7eb67095', 'info_dict': { 'id': '103545', 'ext': 'mp4', 'title': 'Buckcherry`s X Rated Music Video Crazy Bitch', 'description': 'Crazy Bitch X rated music video.', 'uploader': 'oreusz', 'uploader_id': '124697', 'upload_date': '20070507', 'age_limit': 18, } }, { # download URL pattern: */mp4_<format_id>_<video_id>.mp4 'url': 'http://www.spankwire.com/Titcums-Compiloation-I/video1921551/', 'md5': '09b3c20833308b736ae8902db2f8d7e6', 'info_dict': { 'id': '1921551', 'ext': 'mp4', 'title': 'Titcums Compiloation I', 'description': 'cum on tits', 'uploader': 'dannyh78999', 'uploader_id': '3056053', 'upload_date': '20150822', 'age_limit': 18, }, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') req = sanitized_Request('http://www.' + mobj.group('url')) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) title = self._html_search_regex( r'<h1>([^<]+)', webpage, 'title') description = self._html_search_regex( r'(?s)<div\s+id="descriptionContent">(.+?)</div>', webpage, 'description', fatal=False) thumbnail = self._html_search_regex( r'playerData\.screenShot\s*=\s*["\']([^"\']+)["\']', webpage, 'thumbnail', fatal=False) uploader = self._html_search_regex( r'by:\s*<a [^>]*>(.+?)</a>', webpage, 'uploader', fatal=False) uploader_id = self._html_search_regex( r'by:\s*<a href="/(?:user/viewProfile|Profile\.aspx)\?.*?UserId=(\d+).*?"', webpage, 'uploader id', fatal=False) upload_date = unified_strdate(self._html_search_regex( r'</a> on (.+?) at \d+:\d+', webpage, 'upload date', fatal=False)) view_count = str_to_int(self._html_search_regex( r'<div id="viewsCounter"><span>([\d,\.]+)</span> views</div>', webpage, 'view count', fatal=False)) comment_count = str_to_int(self._html_search_regex( r'<span\s+id="spCommentCount"[^>]*>([\d,\.]+)</span>', webpage, 'comment count', fatal=False)) videos = re.findall( r'playerData\.cdnPath([0-9]{3,})\s*=\s*(?:encodeURIComponent\()?["\']([^"\']+)["\']', webpage) heights = [int(video[0]) for video in videos] video_urls = list(map(compat_urllib_parse_unquote, [video[1] for video in videos])) if webpage.find(r'flashvars\.encrypted = "true"') != -1: password = self._search_regex( r'flashvars\.video_title = "([^"]+)', webpage, 'password').replace('+', ' ') video_urls = list(map( lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls)) formats = [] for height, video_url in zip(heights, video_urls): path = compat_urllib_parse_urlparse(video_url).path m = re.search(r'/(?P<height>\d+)[pP]_(?P<tbr>\d+)[kK]', path) if m: tbr = int(m.group('tbr')) height = int(m.group('height')) else: tbr = None formats.append({ 'url': video_url, 'format_id': '%dp' % height, 'height': height, 'tbr': tbr, }) self._sort_formats(formats) age_limit = self._rta_search(webpage) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'uploader_id': uploader_id, 'upload_date': upload_date, 'view_count': view_count, 'comment_count': comment_count, 'formats': formats, 'age_limit': age_limit, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/spiegel.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from .nexx import ( NexxIE, NexxEmbedIE, ) from .spiegeltv import SpiegeltvIE from ..compat import compat_urlparse from ..utils import ( parse_duration, strip_or_none, unified_timestamp, ) class SpiegelIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed|-iframe)?(?:\.html)?(?:#.*)?$' _TESTS = [{ 'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html', 'md5': 'b57399839d055fccfeb9a0455c439868', 'info_dict': { 'id': '563747', 'ext': 'mp4', 'title': 'Vulkanausbruch in Ecuador: Der "Feuerschlund" ist wieder aktiv', 'description': 'md5:8029d8310232196eb235d27575a8b9f4', 'duration': 49, 'upload_date': '20130311', 'timestamp': 1362994320, }, }, { 'url': 'http://www.spiegel.de/video/schach-wm-videoanalyse-des-fuenften-spiels-video-1309159.html', 'md5': '5b6c2f4add9d62912ed5fc78a1faed80', 'info_dict': { 'id': '580988', 'ext': 'mp4', 'title': 'Schach-WM in der Videoanalyse: Carlsen nutzt die Fehlgriffe des Titelverteidigers', 'description': 'md5:c2322b65e58f385a820c10fa03b2d088', 'duration': 983, 'upload_date': '20131115', 'timestamp': 1384546642, }, }, { 'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-embed.html', 'md5': '97b91083a672d72976faa8433430afb9', 'info_dict': { 'id': '601883', 'ext': 'mp4', 'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen Astronauten Alexander Gerst über sein Leben auf der ISS-Station befragen. Hier kommen seine Antworten auf die besten sechs Fragen.', 'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die Tageszeiten mit?"', 'upload_date': '20140904', 'timestamp': 1409834160, } }, { 'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-iframe.html', 'only_matching': True, }, { # nexx video 'url': 'http://www.spiegel.de/video/spiegel-tv-magazin-ueber-guellekrise-in-schleswig-holstein-video-99012776.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) metadata_url = 'http://www.spiegel.de/video/metadata/video-%s.json' % video_id handle = self._request_webpage(metadata_url, video_id) # 302 to spiegel.tv, like http://www.spiegel.de/video/der-film-zum-wochenende-die-wahrheit-ueber-maenner-video-99003272.html if SpiegeltvIE.suitable(handle.geturl()): return self.url_result(handle.geturl(), 'Spiegeltv') video_data = self._parse_json(self._webpage_read_content( handle, metadata_url, video_id), video_id) title = video_data['title'] nexx_id = video_data['nexxOmniaId'] domain_id = video_data.get('nexxOmniaDomain') or '748' return { '_type': 'url_transparent', 'id': video_id, 'url': 'nexx:%s:%s' % (domain_id, nexx_id), 'title': title, 'description': strip_or_none(video_data.get('teaser')), 'duration': parse_duration(video_data.get('duration')), 'timestamp': unified_timestamp(video_data.get('datum')), 'ie_key': NexxIE.ie_key(), } class SpiegelArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?spiegel\.de/(?!video/)[^?#]*?-(?P<id>[0-9]+)\.html' IE_NAME = 'Spiegel:Article' IE_DESC = 'Articles on spiegel.de' _TESTS = [{ 'url': 'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html', 'info_dict': { 'id': '1516455', 'ext': 'mp4', 'title': 'Faszination Badminton: Nennt es bloß nicht Federball', 'description': 're:^Patrick Kämnitz gehört.{100,}', 'upload_date': '20140825', }, }, { 'url': 'http://www.spiegel.de/wissenschaft/weltall/astronaut-alexander-gerst-antwortet-spiegel-online-lesern-a-989876.html', 'info_dict': { }, 'playlist_count': 6, }, { # Nexx iFrame embed 'url': 'http://www.spiegel.de/sptv/spiegeltv/spiegel-tv-ueber-schnellste-katapult-achterbahn-der-welt-taron-a-1137884.html', 'info_dict': { 'id': '161464', 'ext': 'mp4', 'title': 'Nervenkitzel Achterbahn', 'alt_title': 'Karussellbauer in Deutschland', 'description': 'md5:ffe7b1cc59a01f585e0569949aef73cc', 'release_year': 2005, 'creator': 'SPIEGEL TV', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2761, 'timestamp': 1394021479, 'upload_date': '20140305', }, 'params': { 'format': 'bestvideo', 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) # Single video on top of the page video_link = self._search_regex( r'<a href="([^"]+)" onclick="return spOpenVideo\(this,', webpage, 'video page URL', default=None) if video_link: video_url = compat_urlparse.urljoin( self.http_scheme() + '//spiegel.de/', video_link) return self.url_result(video_url) # Multiple embedded videos embeds = re.findall( r'<div class="vid_holder[0-9]+.*?</div>\s*.*?url\s*=\s*"([^"]+)"', webpage) entries = [ self.url_result(compat_urlparse.urljoin( self.http_scheme() + '//spiegel.de/', embed_path)) for embed_path in embeds] if embeds: return self.playlist_result(entries) return self.playlist_from_matches( NexxEmbedIE._extract_urls(webpage), ie=NexxEmbedIE.ie_key())
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/spiegeltv.py
from __future__ import unicode_literals from .common import InfoExtractor from .nexx import NexxIE class SpiegeltvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?spiegel\.tv/videos/(?P<id>\d+)' _TEST = { 'url': 'http://www.spiegel.tv/videos/161681-flug-mh370/', 'only_matching': True, } def _real_extract(self, url): return self.url_result( 'https://api.nexx.cloud/v3/748/videos/byid/%s' % self._match_id(url), ie=NexxIE.ie_key())
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/spike.py
from __future__ import unicode_literals from .mtv import MTVServicesInfoExtractor class BellatorIE(MTVServicesInfoExtractor): _VALID_URL = r'https?://(?:www\.)?bellator\.com/[^/]+/[\da-z]{6}(?:[/?#&]|$)' _TESTS = [{ 'url': 'http://www.bellator.com/fight/atwr7k/bellator-158-michael-page-vs-evangelista-cyborg', 'info_dict': { 'id': 'b55e434e-fde1-4a98-b7cc-92003a034de4', 'ext': 'mp4', 'title': 'Douglas Lima vs. Paul Daley - Round 1', 'description': 'md5:805a8dd29310fd611d32baba2f767885', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.bellator.com/video-clips/bw6k7n/bellator-158-foundations-michael-venom-page', 'only_matching': True, }] _FEED_URL = 'http://www.bellator.com/feeds/mrss/' _GEO_COUNTRIES = ['US'] class ParamountNetworkIE(MTVServicesInfoExtractor): _VALID_URL = r'https?://(?:www\.)?paramountnetwork\.com/[^/]+/[\da-z]{6}(?:[/?#&]|$)' _TESTS = [{ 'url': 'http://www.paramountnetwork.com/episodes/j830qm/lip-sync-battle-joel-mchale-vs-jim-rash-season-2-ep-13', 'info_dict': { 'id': '37ace3a8-1df6-48be-85b8-38df8229e241', 'ext': 'mp4', 'title': 'Lip Sync Battle|April 28, 2016|2|209|Joel McHale Vs. Jim Rash|Act 1', 'description': 'md5:a739ca8f978a7802f67f8016d27ce114', }, 'params': { # m3u8 download 'skip_download': True, }, }] _FEED_URL = 'http://www.paramountnetwork.com/feeds/mrss/' _GEO_COUNTRIES = ['US'] def _extract_mgid(self, webpage): root_data = self._parse_json(self._search_regex( r'window\.__DATA__\s*=\s*({.+})', webpage, 'data'), None) def find_sub_data(data, data_type): return next(c for c in data['children'] if c.get('type') == data_type) c = find_sub_data(find_sub_data(root_data, 'MainContainer'), 'VideoPlayer') return c['props']['media']['video']['config']['uri']
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sport5.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ExtractorError class Sport5IE(InfoExtractor): _VALID_URL = r'https?://(?:www|vod)?\.sport5\.co\.il/.*\b(?:Vi|docID)=(?P<id>\d+)' _TESTS = [ { 'url': 'http://vod.sport5.co.il/?Vc=147&Vi=176331&Page=1', 'info_dict': { 'id': 's5-Y59xx1-GUh2', 'ext': 'mp4', 'title': 'ולנסיה-קורדובה 0:3', 'description': 'אלקאסר, גאייה ופגולי סידרו לקבוצה של נונו ניצחון על קורדובה ואת המקום הראשון בליגה', 'duration': 228, 'categories': list, }, 'skip': 'Blocked outside of Israel', }, { 'url': 'http://www.sport5.co.il/articles.aspx?FolderID=3075&docID=176372&lang=HE', 'info_dict': { 'id': 's5-SiXxx1-hKh2', 'ext': 'mp4', 'title': 'GOALS_CELTIC_270914.mp4', 'description': '', 'duration': 87, 'categories': list, }, 'skip': 'Blocked outside of Israel', } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) media_id = mobj.group('id') webpage = self._download_webpage(url, media_id) video_id = self._html_search_regex(r'clipId=([\w-]+)', webpage, 'video id') metadata = self._download_xml( 'http://sport5-metadata-rr-d.nsacdn.com/vod/vod/%s/HDS/metadata.xml' % video_id, video_id) error = metadata.find('./Error') if error is not None: raise ExtractorError( '%s returned error: %s - %s' % ( self.IE_NAME, error.find('./Name').text, error.find('./Description').text), expected=True) title = metadata.find('./Title').text description = metadata.find('./Description').text duration = int(metadata.find('./Duration').text) posters_el = metadata.find('./PosterLinks') thumbnails = [{ 'url': thumbnail.text, 'width': int(thumbnail.get('width')), 'height': int(thumbnail.get('height')), } for thumbnail in posters_el.findall('./PosterIMG')] if posters_el is not None else [] categories_el = metadata.find('./Categories') categories = [ cat.get('name') for cat in categories_el.findall('./Category') ] if categories_el is not None else [] formats = [{ 'url': fmt.text, 'ext': 'mp4', 'vbr': int(fmt.get('bitrate')), 'width': int(fmt.get('width')), 'height': int(fmt.get('height')), } for fmt in metadata.findall('./PlaybackLinks/FileURL')] self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnails': thumbnails, 'duration': duration, 'categories': categories, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sportbox.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, js_to_json, merge_dicts, ) class SportBoxIE(InfoExtractor): _VALID_URL = r'https?://(?:news\.sportbox|matchtv)\.ru/vdl/player(?:/[^/]+/|\?.*?\bn?id=)(?P<id>\d+)' _TESTS = [{ 'url': 'http://news.sportbox.ru/vdl/player/ci/211355', 'info_dict': { 'id': '109158', 'ext': 'mp4', 'title': 'В Новороссийске прошел детский турнир «Поле славы боевой»', 'description': 'В Новороссийске прошел детский турнир «Поле славы боевой»', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 292, 'view_count': int, 'timestamp': 1426237001, 'upload_date': '20150313', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://news.sportbox.ru/vdl/player?nid=370908&only_player=1&autostart=false&playeri=2&height=340&width=580', 'only_matching': True, }, { 'url': 'https://news.sportbox.ru/vdl/player/media/193095', 'only_matching': True, }, { 'url': 'https://news.sportbox.ru/vdl/player/media/109158', 'only_matching': True, }, { 'url': 'https://matchtv.ru/vdl/player/media/109158', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+src="(https?://(?:news\.sportbox|matchtv)\.ru/vdl/player[^"]+)"', webpage) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) sources = self._parse_json( self._search_regex( r'(?s)playerOptions\.sources(?:WithRes)?\s*=\s*(\[.+?\])\s*;\s*\n', webpage, 'sources'), video_id, transform_source=js_to_json) formats = [] for source in sources: src = source.get('src') if not src: continue if determine_ext(src) == 'm3u8': formats.extend(self._extract_m3u8_formats( src, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'url': src, }) self._sort_formats(formats) player = self._parse_json( self._search_regex( r'(?s)playerOptions\s*=\s*({.+?})\s*;\s*\n', webpage, 'player options', default='{}'), video_id, transform_source=js_to_json) media_id = player['mediaId'] info = self._search_json_ld(webpage, media_id, default={}) view_count = int_or_none(self._search_regex( r'Просмотров\s*:\s*(\d+)', webpage, 'view count', default=None)) return merge_dicts(info, { 'id': media_id, 'title': self._og_search_title(webpage, default=None) or media_id, 'thumbnail': player.get('poster'), 'duration': int_or_none(player.get('duration')), 'view_count': view_count, 'formats': formats, })
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sportdeutschland.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( parse_iso8601, sanitized_Request, ) class SportDeutschlandIE(InfoExtractor): _VALID_URL = r'https?://sportdeutschland\.tv/(?P<sport>[^/?#]+)/(?P<id>[^?#/]+)(?:$|[?#])' _TESTS = [{ 'url': 'http://sportdeutschland.tv/badminton/live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen', 'info_dict': { 'id': 'live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen', 'ext': 'mp4', 'title': 're:Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen', 'categories': ['Badminton'], 'view_count': int, 'thumbnail': r're:^https?://.*\.jpg$', 'description': r're:Die Badminton-WM 2014 aus Kopenhagen bei Sportdeutschland\.TV', 'timestamp': int, 'upload_date': 're:^201408[23][0-9]$', }, 'params': { 'skip_download': 'Live stream', }, }, { 'url': 'http://sportdeutschland.tv/li-ning-badminton-wm-2014/lee-li-ning-badminton-weltmeisterschaft-2014-kopenhagen-herren-einzel-wei-vs', 'info_dict': { 'id': 'lee-li-ning-badminton-weltmeisterschaft-2014-kopenhagen-herren-einzel-wei-vs', 'ext': 'mp4', 'upload_date': '20140825', 'description': 'md5:60a20536b57cee7d9a4ec005e8687504', 'timestamp': 1408976060, 'duration': 2732, 'title': 'Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen: Herren Einzel, Wei Lee vs. Keun Lee', 'thumbnail': r're:^https?://.*\.jpg$', 'view_count': int, 'categories': ['Li-Ning Badminton WM 2014'], } }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') sport_id = mobj.group('sport') api_url = 'http://proxy.vidibusdynamic.net/sportdeutschland.tv/api/permalinks/%s/%s?access_token=true' % ( sport_id, video_id) req = sanitized_Request(api_url, headers={ 'Accept': 'application/vnd.vidibus.v2.html+json', 'Referer': url, }) data = self._download_json(req, video_id) asset = data['asset'] categories = [data['section']['title']] formats = [] smil_url = asset['video'] if '.smil' in smil_url: m3u8_url = smil_url.replace('.smil', '.m3u8') formats.extend( self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')) smil_doc = self._download_xml( smil_url, video_id, note='Downloading SMIL metadata') base_url_el = smil_doc.find('./head/meta') if base_url_el: base_url = base_url_el.attrib['base'] formats.extend([{ 'format_id': 'rmtp', 'url': base_url if base_url_el else n.attrib['src'], 'play_path': n.attrib['src'], 'ext': 'flv', 'preference': -100, 'format_note': 'Seems to fail at example stream', } for n in smil_doc.findall('./body/video')]) else: formats.append({'url': smil_url}) self._sort_formats(formats) return { 'id': video_id, 'formats': formats, 'title': asset['title'], 'thumbnail': asset.get('image'), 'description': asset.get('teaser'), 'duration': asset.get('duration'), 'categories': categories, 'view_count': asset.get('views'), 'rtmp_live': asset.get('live'), 'timestamp': parse_iso8601(asset.get('date')), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/springboardplatform.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, xpath_attr, xpath_text, xpath_element, unescapeHTML, unified_timestamp, ) class SpringboardPlatformIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// cms\.springboardplatform\.com/ (?: (?:previews|embed_iframe)/(?P<index>\d+)/video/(?P<id>\d+)| xml_feeds_advanced/index/(?P<index_2>\d+)/rss3/(?P<id_2>\d+) ) ''' _TESTS = [{ 'url': 'http://cms.springboardplatform.com/previews/159/video/981017/0/0/1', 'md5': '5c3cb7b5c55740d482561099e920f192', 'info_dict': { 'id': '981017', 'ext': 'mp4', 'title': 'Redman "BUD like YOU" "Usher Good Kisser" REMIX', 'description': 'Redman "BUD like YOU" "Usher Good Kisser" REMIX', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1409132328, 'upload_date': '20140827', 'duration': 193, }, }, { 'url': 'http://cms.springboardplatform.com/embed_iframe/159/video/981017/rab007/rapbasement.com/1/1', 'only_matching': True, }, { 'url': 'http://cms.springboardplatform.com/embed_iframe/20/video/1731611/ki055/kidzworld.com/10', 'only_matching': True, }, { 'url': 'http://cms.springboardplatform.com/xml_feeds_advanced/index/159/rss3/981017/0/0/1/', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return [ mobj.group('url') for mobj in re.finditer( r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//cms\.springboardplatform\.com/embed_iframe/\d+/video/\d+.*?)\1', webpage)] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') or mobj.group('id_2') index = mobj.group('index') or mobj.group('index_2') video = self._download_xml( 'http://cms.springboardplatform.com/xml_feeds_advanced/index/%s/rss3/%s' % (index, video_id), video_id) item = xpath_element(video, './/item', 'item', fatal=True) content = xpath_element( item, './{http://search.yahoo.com/mrss/}content', 'content', fatal=True) title = unescapeHTML(xpath_text(item, './title', 'title', fatal=True)) video_url = content.attrib['url'] if 'error_video.mp4' in video_url: raise ExtractorError( 'Video %s no longer exists' % video_id, expected=True) duration = int_or_none(content.get('duration')) tbr = int_or_none(content.get('bitrate')) filesize = int_or_none(content.get('fileSize')) width = int_or_none(content.get('width')) height = int_or_none(content.get('height')) description = unescapeHTML(xpath_text( item, './description', 'description')) thumbnail = xpath_attr( item, './{http://search.yahoo.com/mrss/}thumbnail', 'url', 'thumbnail') timestamp = unified_timestamp(xpath_text( item, './{http://cms.springboardplatform.com/namespaces.html}created', 'timestamp')) formats = [{ 'url': video_url, 'format_id': 'http', 'tbr': tbr, 'filesize': filesize, 'width': width, 'height': height, }] m3u8_format = formats[0].copy() m3u8_format.update({ 'url': re.sub(r'(https?://)cdn\.', r'\1hls.', video_url) + '.m3u8', 'ext': 'mp4', 'format_id': 'hls', 'protocol': 'm3u8_native', }) formats.append(m3u8_format) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sprout.py
# coding: utf-8 from __future__ import unicode_literals from .adobepass import AdobePassIE from ..utils import ( extract_attributes, update_url_query, smuggle_url, ) class SproutIE(AdobePassIE): _VALID_URL = r'https?://(?:www\.)?sproutonline\.com/watch/(?P<id>[^/?#]+)' _TEST = { 'url': 'http://www.sproutonline.com/watch/cowboy-adventure', 'md5': '74bf14128578d1e040c3ebc82088f45f', 'info_dict': { 'id': '9dexnwtmh8_X', 'ext': 'mp4', 'title': 'A Cowboy Adventure', 'description': 'Ruff-Ruff, Tweet and Dave get to be cowboys for the day at Six Cow Corral.', 'timestamp': 1437758640, 'upload_date': '20150724', 'uploader': 'NBCU-SPROUT-NEW', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_component = self._search_regex( r'(?s)(<div[^>]+data-component="video"[^>]*?>)', webpage, 'video component', default=None) if video_component: options = self._parse_json(extract_attributes( video_component)['data-options'], video_id) theplatform_url = options['video'] query = { 'mbr': 'true', 'manifest': 'm3u', } if options.get('protected'): query['auth'] = self._extract_mvpd_auth(url, options['pid'], 'sprout', 'sprout') theplatform_url = smuggle_url(update_url_query( theplatform_url, query), {'force_smil_url': True}) else: iframe = self._search_regex( r'(<iframe[^>]+id="sproutVideoIframe"[^>]*?>)', webpage, 'iframe') theplatform_url = extract_attributes(iframe)['src'] return self.url_result(theplatform_url, 'ThePlatform')
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/srgssr.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_urllib_parse_urlparse from ..utils import ( ExtractorError, parse_iso8601, qualities, ) class SRGSSRIE(InfoExtractor): _VALID_URL = r'(?:https?://tp\.srgssr\.ch/p(?:/[^/]+)+\?urn=urn|srgssr):(?P<bu>srf|rts|rsi|rtr|swi):(?:[^:]+:)?(?P<type>video|audio):(?P<id>[0-9a-f\-]{36}|\d+)' _GEO_BYPASS = False _GEO_COUNTRIES = ['CH'] _ERRORS = { 'AGERATING12': 'To protect children under the age of 12, this video is only available between 8 p.m. and 6 a.m.', 'AGERATING18': 'To protect children under the age of 18, this video is only available between 11 p.m. and 5 a.m.', # 'ENDDATE': 'For legal reasons, this video was only available for a specified period of time.', 'GEOBLOCK': 'For legal reasons, this video is only available in Switzerland.', 'LEGAL': 'The video cannot be transmitted for legal reasons.', 'STARTDATE': 'This video is not yet available. Please try again later.', } def _get_tokenized_src(self, url, video_id, format_id): sp = compat_urllib_parse_urlparse(url).path.split('/') token = self._download_json( 'http://tp.srgssr.ch/akahd/token?acl=/%s/%s/*' % (sp[1], sp[2]), video_id, 'Downloading %s token' % format_id, fatal=False) or {} auth_params = token.get('token', {}).get('authparams') if auth_params: url += '?' + auth_params return url def get_media_data(self, bu, media_type, media_id): media_data = self._download_json( 'http://il.srgssr.ch/integrationlayer/1.0/ue/%s/%s/play/%s.json' % (bu, media_type, media_id), media_id)[media_type.capitalize()] if media_data.get('block') and media_data['block'] in self._ERRORS: message = self._ERRORS[media_data['block']] if media_data['block'] == 'GEOBLOCK': self.raise_geo_restricted( msg=message, countries=self._GEO_COUNTRIES) raise ExtractorError( '%s said: %s' % (self.IE_NAME, message), expected=True) return media_data def _real_extract(self, url): bu, media_type, media_id = re.match(self._VALID_URL, url).groups() media_data = self.get_media_data(bu, media_type, media_id) metadata = media_data['AssetMetadatas']['AssetMetadata'][0] title = metadata['title'] description = metadata.get('description') created_date = media_data.get('createdDate') or metadata.get('createdDate') timestamp = parse_iso8601(created_date) thumbnails = [{ 'id': image.get('id'), 'url': image['url'], } for image in media_data.get('Image', {}).get('ImageRepresentations', {}).get('ImageRepresentation', [])] preference = qualities(['LQ', 'MQ', 'SD', 'HQ', 'HD']) formats = [] for source in media_data.get('Playlists', {}).get('Playlist', []) + media_data.get('Downloads', {}).get('Download', []): protocol = source.get('@protocol') for asset in source['url']: asset_url = asset['text'] quality = asset['@quality'] format_id = '%s-%s' % (protocol, quality) if protocol.startswith('HTTP-HDS') or protocol.startswith('HTTP-HLS'): asset_url = self._get_tokenized_src(asset_url, media_id, format_id) if protocol.startswith('HTTP-HDS'): formats.extend(self._extract_f4m_formats( asset_url + ('?' if '?' not in asset_url else '&') + 'hdcore=3.4.0', media_id, f4m_id=format_id, fatal=False)) elif protocol.startswith('HTTP-HLS'): formats.extend(self._extract_m3u8_formats( asset_url, media_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False)) else: formats.append({ 'format_id': format_id, 'url': asset_url, 'preference': preference(quality), 'ext': 'flv' if protocol == 'RTMP' else None, }) self._sort_formats(formats) return { 'id': media_id, 'title': title, 'description': description, 'timestamp': timestamp, 'thumbnails': thumbnails, 'formats': formats, } class SRGSSRPlayIE(InfoExtractor): IE_DESC = 'srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites' _VALID_URL = r'''(?x) https?:// (?:(?:www|play)\.)? (?P<bu>srf|rts|rsi|rtr|swissinfo)\.ch/play/(?:tv|radio)/ (?: [^/]+/(?P<type>video|audio)/[^?]+| popup(?P<type_2>video|audio)player ) \?id=(?P<id>[0-9a-f\-]{36}|\d+) ''' _TESTS = [{ 'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'md5': 'da6b5b3ac9fa4761a942331cef20fcb3', 'info_dict': { 'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'ext': 'mp4', 'upload_date': '20130701', 'title': 'Snowden beantragt Asyl in Russland', 'timestamp': 1372713995, } }, { # No Speichern (Save) button 'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa', 'md5': '0a274ce38fda48c53c01890651985bc6', 'info_dict': { 'id': '677f5829-e473-4823-ac83-a1087fe97faa', 'ext': 'flv', 'upload_date': '20130710', 'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive', 'description': 'md5:88604432b60d5a38787f152dec89cd56', 'timestamp': 1373493600, }, }, { 'url': 'http://www.rtr.ch/play/radio/actualitad/audio/saira-tujetsch-tuttina-cuntinuar-cun-sedrun-muster-turissem?id=63cb0778-27f8-49af-9284-8c7a8c6d15fc', 'info_dict': { 'id': '63cb0778-27f8-49af-9284-8c7a8c6d15fc', 'ext': 'mp3', 'upload_date': '20151013', 'title': 'Saira: Tujetsch - tuttina cuntinuar cun Sedrun Mustér Turissem', 'timestamp': 1444750398, }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260', 'md5': '67a2a9ae4e8e62a68d0e9820cc9782df', 'info_dict': { 'id': '6348260', 'display_id': '6348260', 'ext': 'mp4', 'duration': 1796, 'title': 'Le 19h30', 'description': '', 'uploader': '19h30', 'upload_date': '20141201', 'timestamp': 1417458600, 'thumbnail': r're:^https?://.*\.image', 'view_count': int, }, 'params': { # m3u8 download 'skip_download': True, } }, { 'url': 'https://www.srf.ch/play/tv/popupvideoplayer?id=c4dba0ca-e75b-43b2-a34f-f708a4932e01', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) bu = mobj.group('bu') media_type = mobj.group('type') or mobj.group('type_2') media_id = mobj.group('id') # other info can be extracted from url + '&layout=json' return self.url_result('srgssr:%s:%s:%s' % (bu[:3], media_type, media_id), 'SRGSSR')
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/srmediathek.py
# coding: utf-8 from __future__ import unicode_literals from .ard import ARDMediathekIE from ..utils import ( ExtractorError, get_element_by_attribute, ) class SRMediathekIE(ARDMediathekIE): IE_NAME = 'sr:mediathek' IE_DESC = 'Saarländischer Rundfunk' _VALID_URL = r'https?://sr-mediathek(?:\.sr-online)?\.de/index\.php\?.*?&id=(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=28455', 'info_dict': { 'id': '28455', 'ext': 'mp4', 'title': 'sportarena (26.10.2014)', 'description': 'Ringen: KSV Köllerbach gegen Aachen-Walheim; Frauen-Fußball: 1. FC Saarbrücken gegen Sindelfingen; Motorsport: Rallye in Losheim; dazu: Interview mit Timo Bernhard; Turnen: TG Saar; Reitsport: Deutscher Voltigier-Pokal; Badminton: Interview mit Michael Fuchs ', 'thumbnail': r're:^https?://.*\.jpg$', }, 'skip': 'no longer available', }, { 'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=37682', 'info_dict': { 'id': '37682', 'ext': 'mp4', 'title': 'Love, Cakes and Rock\'n\'Roll', 'description': 'md5:18bf9763631c7d326c22603681e1123d', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://sr-mediathek.de/index.php?seite=7&id=7480', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) if '>Der gew&uuml;nschte Beitrag ist leider nicht mehr verf&uuml;gbar.<' in webpage: raise ExtractorError('Video %s is no longer available' % video_id, expected=True) media_collection_url = self._search_regex( r'data-mediacollection-ardplayer="([^"]+)"', webpage, 'media collection url') info = self._extract_media_info(media_collection_url, webpage, video_id) info.update({ 'id': video_id, 'title': get_element_by_attribute('class', 'ardplayer-title', webpage), 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/stanfordoc.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, orderedSet, unescapeHTML, ) class StanfordOpenClassroomIE(InfoExtractor): IE_NAME = 'stanfordoc' IE_DESC = 'Stanford Open ClassRoom' _VALID_URL = r'https?://openclassroom\.stanford\.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$' _TEST = { 'url': 'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100', 'md5': '544a9468546059d4e80d76265b0443b8', 'info_dict': { 'id': 'PracticalUnix_intro-environment', 'ext': 'mp4', 'title': 'Intro Environment', } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj.group('course') and mobj.group('video'): # A specific video course = mobj.group('course') video = mobj.group('video') info = { 'id': course + '_' + video, 'uploader': None, 'upload_date': None, } baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/' xmlUrl = baseUrl + video + '.xml' mdoc = self._download_xml(xmlUrl, info['id']) try: info['title'] = mdoc.findall('./title')[0].text info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text except IndexError: raise ExtractorError('Invalid metadata XML file') return info elif mobj.group('course'): # A course page course = mobj.group('course') info = { 'id': course, '_type': 'playlist', 'uploader': None, 'upload_date': None, } coursepage = self._download_webpage( url, info['id'], note='Downloading course info page', errnote='Unable to download course info page') info['title'] = self._html_search_regex( r'<h1>([^<]+)</h1>', coursepage, 'title', default=info['id']) info['description'] = self._html_search_regex( r'(?s)<description>([^<]+)</description>', coursepage, 'description', fatal=False) links = orderedSet(re.findall(r'<a href="(VideoPage\.php\?[^"]+)">', coursepage)) info['entries'] = [self.url_result( 'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l) ) for l in links] return info else: # Root page info = { 'id': 'Stanford OpenClassroom', '_type': 'playlist', 'uploader': None, 'upload_date': None, } info['title'] = info['id'] rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php' rootpage = self._download_webpage(rootURL, info['id'], errnote='Unable to download course info page') links = orderedSet(re.findall(r'<a href="(CoursePage\.php\?[^"]+)">', rootpage)) info['entries'] = [self.url_result( 'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l) ) for l in links] return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/steam.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( extract_attributes, ExtractorError, get_element_by_class, js_to_json, ) class SteamIE(InfoExtractor): _VALID_URL = r"""(?x) https?://store\.steampowered\.com/ (agecheck/)? (?P<urltype>video|app)/ #If the page is only for videos or for a game (?P<gameID>\d+)/? (?P<videoID>\d*)(?P<extra>\??) # For urltype == video we sometimes get the videoID | https?://(?:www\.)?steamcommunity\.com/sharedfiles/filedetails/\?id=(?P<fileID>[0-9]+) """ _VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/' _AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970' _TESTS = [{ 'url': 'http://store.steampowered.com/video/105600/', 'playlist': [ { 'md5': '6a294ee0c4b1f47f5bb76a65e31e3592', 'info_dict': { 'id': '2040428', 'ext': 'mp4', 'title': 'Terraria 1.3 Trailer', 'playlist_index': 1, } }, { 'md5': '911672b20064ca3263fa89650ba5a7aa', 'info_dict': { 'id': '2029566', 'ext': 'mp4', 'title': 'Terraria 1.2 Trailer', 'playlist_index': 2, } } ], 'info_dict': { 'id': '105600', 'title': 'Terraria', }, 'params': { 'playlistend': 2, } }, { 'url': 'http://steamcommunity.com/sharedfiles/filedetails/?id=242472205', 'info_dict': { 'id': 'X8kpJBlzD2E', 'ext': 'mp4', 'upload_date': '20140617', 'title': 'FRONTIERS - Trapping', 'description': 'md5:bf6f7f773def614054089e5769c12a6e', 'uploader': 'AAD Productions', 'uploader_id': 'AtomicAgeDogGames', } }] def _real_extract(self, url): m = re.match(self._VALID_URL, url) fileID = m.group('fileID') if fileID: videourl = url playlist_id = fileID else: gameID = m.group('gameID') playlist_id = gameID videourl = self._VIDEO_PAGE_TEMPLATE % playlist_id self._set_cookie('steampowered.com', 'mature_content', '1') webpage = self._download_webpage(videourl, playlist_id) if re.search('<h2>Please enter your birth date to continue:</h2>', webpage) is not None: videourl = self._AGECHECK_TEMPLATE % playlist_id self.report_age_confirmation() webpage = self._download_webpage(videourl, playlist_id) flash_vars = self._parse_json(self._search_regex( r'(?s)rgMovieFlashvars\s*=\s*({.+?});', webpage, 'flash vars'), playlist_id, js_to_json) playlist_title = None entries = [] if fileID: playlist_title = get_element_by_class('workshopItemTitle', webpage) for movie in flash_vars.values(): if not movie: continue youtube_id = movie.get('YOUTUBE_VIDEO_ID') if not youtube_id: continue entries.append({ '_type': 'url', 'url': youtube_id, 'ie_key': 'Youtube', }) else: playlist_title = get_element_by_class('apphub_AppName', webpage) for movie_id, movie in flash_vars.items(): if not movie: continue video_id = self._search_regex(r'movie_(\d+)', movie_id, 'video id', fatal=False) title = movie.get('MOVIE_NAME') if not title or not video_id: continue entry = { 'id': video_id, 'title': title.replace('+', ' '), } formats = [] flv_url = movie.get('FILENAME') if flv_url: formats.append({ 'format_id': 'flv', 'url': flv_url, }) highlight_element = self._search_regex( r'(<div[^>]+id="highlight_movie_%s"[^>]+>)' % video_id, webpage, 'highlight element', fatal=False) if highlight_element: highlight_attribs = extract_attributes(highlight_element) if highlight_attribs: entry['thumbnail'] = highlight_attribs.get('data-poster') for quality in ('', '-hd'): for ext in ('webm', 'mp4'): video_url = highlight_attribs.get('data-%s%s-source' % (ext, quality)) if video_url: formats.append({ 'format_id': ext + quality, 'url': video_url, }) if not formats: continue entry['formats'] = formats entries.append(entry) if not entries: raise ExtractorError('Could not find any videos') return self.playlist_result(entries, playlist_id, playlist_title)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/stitcher.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, js_to_json, unescapeHTML, ) class StitcherIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?stitcher\.com/podcast/(?:[^/]+/)+e/(?:(?P<display_id>[^/#?&]+?)-)?(?P<id>\d+)(?:[/#?&]|$)' _TESTS = [{ 'url': 'http://www.stitcher.com/podcast/the-talking-machines/e/40789481?autoplay=true', 'md5': '391dd4e021e6edeb7b8e68fbf2e9e940', 'info_dict': { 'id': '40789481', 'ext': 'mp3', 'title': 'Machine Learning Mastery and Cancer Clusters', 'description': 'md5:55163197a44e915a14a1ac3a1de0f2d3', 'duration': 1604, 'thumbnail': r're:^https?://.*\.jpg', }, }, { 'url': 'http://www.stitcher.com/podcast/panoply/vulture-tv/e/the-rare-hourlong-comedy-plus-40846275?autoplay=true', 'info_dict': { 'id': '40846275', 'display_id': 'the-rare-hourlong-comedy-plus', 'ext': 'mp3', 'title': "The CW's 'Crazy Ex-Girlfriend'", 'description': 'md5:04f1e2f98eb3f5cbb094cea0f9e19b17', 'duration': 2235, 'thumbnail': r're:^https?://.*\.jpg', }, 'params': { 'skip_download': True, }, }, { # escaped title 'url': 'http://www.stitcher.com/podcast/marketplace-on-stitcher/e/40910226?autoplay=true', 'only_matching': True, }, { 'url': 'http://www.stitcher.com/podcast/panoply/getting-in/e/episode-2a-how-many-extracurriculars-should-i-have-40876278?autoplay=true', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) audio_id = mobj.group('id') display_id = mobj.group('display_id') or audio_id webpage = self._download_webpage(url, display_id) episode = self._parse_json( js_to_json(self._search_regex( r'(?s)var\s+stitcher(?:Config)?\s*=\s*({.+?});\n', webpage, 'episode config')), display_id)['config']['episode'] title = unescapeHTML(episode['title']) formats = [{ 'url': episode[episode_key], 'ext': determine_ext(episode[episode_key]) or 'mp3', 'vcodec': 'none', } for episode_key in ('episodeURL',) if episode.get(episode_key)] description = self._search_regex( r'Episode Info:\s*</span>([^<]+)<', webpage, 'description', fatal=False) duration = int_or_none(episode.get('duration')) thumbnail = episode.get('episodeImage') return { 'id': audio_id, 'display_id': display_id, 'title': title, 'description': description, 'duration': duration, 'thumbnail': thumbnail, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/streamable.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, ) class StreamableIE(InfoExtractor): _VALID_URL = r'https?://streamable\.com/(?:[es]/)?(?P<id>\w+)' _TESTS = [ { 'url': 'https://streamable.com/dnd1', 'md5': '3e3bc5ca088b48c2d436529b64397fef', 'info_dict': { 'id': 'dnd1', 'ext': 'mp4', 'title': 'Mikel Oiarzabal scores to make it 0-3 for La Real against Espanyol', 'thumbnail': r're:https?://.*\.jpg$', 'uploader': 'teabaker', 'timestamp': 1454964157.35115, 'upload_date': '20160208', 'duration': 61.516, 'view_count': int, } }, # older video without bitrate, width/height, etc. info { 'url': 'https://streamable.com/moo', 'md5': '2cf6923639b87fba3279ad0df3a64e73', 'info_dict': { 'id': 'moo', 'ext': 'mp4', 'title': '"Please don\'t eat me!"', 'thumbnail': r're:https?://.*\.jpg$', 'timestamp': 1426115495, 'upload_date': '20150311', 'duration': 12, 'view_count': int, } }, { 'url': 'https://streamable.com/e/dnd1', 'only_matching': True, }, { 'url': 'https://streamable.com/s/okkqk/drxjds', 'only_matching': True, } ] @staticmethod def _extract_url(webpage): mobj = re.search( r'<iframe[^>]+src=(?P<q1>[\'"])(?P<src>(?:https?:)?//streamable\.com/(?:(?!\1).+))(?P=q1)', webpage) if mobj: return mobj.group('src') def _real_extract(self, url): video_id = self._match_id(url) # Note: Using the ajax API, as the public Streamable API doesn't seem # to return video info like the title properly sometimes, and doesn't # include info like the video duration video = self._download_json( 'https://ajax.streamable.com/videos/%s' % video_id, video_id) # Format IDs: # 0 The video is being uploaded # 1 The video is being processed # 2 The video has at least one file ready # 3 The video is unavailable due to an error status = video.get('status') if status != 2: raise ExtractorError( 'This video is currently unavailable. It may still be uploading or processing.', expected=True) title = video.get('reddit_title') or video['title'] formats = [] for key, info in video['files'].items(): if not info.get('url'): continue formats.append({ 'format_id': key, 'url': self._proto_relative_url(info['url']), 'width': int_or_none(info.get('width')), 'height': int_or_none(info.get('height')), 'filesize': int_or_none(info.get('size')), 'fps': int_or_none(info.get('framerate')), 'vbr': float_or_none(info.get('bitrate'), 1000) }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': video.get('description'), 'thumbnail': self._proto_relative_url(video.get('thumbnail_url')), 'uploader': video.get('owner', {}).get('user_name'), 'timestamp': float_or_none(video.get('date_added')), 'duration': float_or_none(video.get('duration')), 'view_count': int_or_none(video.get('plays')), 'formats': formats }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/streamango.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_chr from ..utils import ( determine_ext, ExtractorError, int_or_none, js_to_json, ) class StreamangoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:streamango\.com|fruithosts\.net|streamcherry\.com)/(?:f|embed)/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://streamango.com/f/clapasobsptpkdfe/20170315_150006_mp4', 'md5': 'e992787515a182f55e38fc97588d802a', 'info_dict': { 'id': 'clapasobsptpkdfe', 'ext': 'mp4', 'title': '20170315_150006.mp4', } }, { # no og:title 'url': 'https://streamango.com/embed/foqebrpftarclpob/asdf_asd_2_mp4', 'info_dict': { 'id': 'foqebrpftarclpob', 'ext': 'mp4', 'title': 'foqebrpftarclpob', }, 'params': { 'skip_download': True, }, 'skip': 'gone', }, { 'url': 'https://streamango.com/embed/clapasobsptpkdfe/20170315_150006_mp4', 'only_matching': True, }, { 'url': 'https://fruithosts.net/f/mreodparcdcmspsm/w1f1_r4lph_2018_brrs_720p_latino_mp4', 'only_matching': True, }, { 'url': 'https://streamcherry.com/f/clapasobsptpkdfe/', 'only_matching': True, }] def _real_extract(self, url): def decrypt_src(encoded, val): ALPHABET = '=/+9876543210zyxwvutsrqponmlkjihgfedcbaZYXWVUTSRQPONMLKJIHGFEDCBA' encoded = re.sub(r'[^A-Za-z0-9+/=]', '', encoded) decoded = '' sm = [None] * 4 i = 0 str_len = len(encoded) while i < str_len: for j in range(4): sm[j % 4] = ALPHABET.index(encoded[i]) i += 1 char_code = ((sm[0] << 0x2) | (sm[1] >> 0x4)) ^ val decoded += compat_chr(char_code) if sm[2] != 0x40: char_code = ((sm[1] & 0xf) << 0x4) | (sm[2] >> 0x2) decoded += compat_chr(char_code) if sm[3] != 0x40: char_code = ((sm[2] & 0x3) << 0x6) | sm[3] decoded += compat_chr(char_code) return decoded video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage, default=video_id) formats = [] for format_ in re.findall(r'({[^}]*\bsrc\s*:\s*[^}]*})', webpage): mobj = re.search(r'(src\s*:\s*[^(]+\(([^)]*)\)[\s,]*)', format_) if mobj is None: continue format_ = format_.replace(mobj.group(0), '') video = self._parse_json( format_, video_id, transform_source=js_to_json, fatal=False) or {} mobj = re.search( r'([\'"])(?P<src>(?:(?!\1).)+)\1\s*,\s*(?P<val>\d+)', mobj.group(1)) if mobj is None: continue src = decrypt_src(mobj.group('src'), int_or_none(mobj.group('val'))) if not src: continue ext = determine_ext(src, default_ext=None) if video.get('type') == 'application/dash+xml' or ext == 'mpd': formats.extend(self._extract_mpd_formats( src, video_id, mpd_id='dash', fatal=False)) else: formats.append({ 'url': src, 'ext': ext or 'mp4', 'width': int_or_none(video.get('width')), 'height': int_or_none(video.get('height')), 'tbr': int_or_none(video.get('bitrate')), }) if not formats: error = self._search_regex( r'<p[^>]+\bclass=["\']lead[^>]+>(.+?)</p>', webpage, 'error', default=None) if not error and '>Sorry' in webpage: error = 'Video %s is not available' % video_id if error: raise ExtractorError(error, expected=True) self._sort_formats(formats) return { 'id': video_id, 'url': url, 'title': title, 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/streamcloud.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, urlencode_postdata, ) class StreamcloudIE(InfoExtractor): IE_NAME = 'streamcloud.eu' _VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)(?:/(?P<fname>[^#?]*)\.html)?' _TESTS = [{ 'url': 'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html', 'md5': '6bea4c7fa5daaacc2a946b7146286686', 'info_dict': { 'id': 'skp9j99s4bpz', 'ext': 'mp4', 'title': 'youtube-dl test video \'/\\ ä ↭', }, 'skip': 'Only available from the EU' }, { 'url': 'http://streamcloud.eu/ua8cmfh1nbe6/NSHIP-148--KUC-NG--H264-.mp4.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) url = 'http://streamcloud.eu/%s' % video_id orig_webpage = self._download_webpage(url, video_id) if '>File Not Found<' in orig_webpage: raise ExtractorError( 'Video %s does not exist' % video_id, expected=True) fields = re.findall(r'''(?x)<input\s+ type="(?:hidden|submit)"\s+ name="([^"]+)"\s+ (?:id="[^"]+"\s+)? value="([^"]*)" ''', orig_webpage) self._sleep(6, video_id) webpage = self._download_webpage( url, video_id, data=urlencode_postdata(fields), headers={ b'Content-Type': b'application/x-www-form-urlencoded', }) try: title = self._html_search_regex( r'<h1[^>]*>([^<]+)<', webpage, 'title') video_url = self._search_regex( r'file:\s*"([^"]+)"', webpage, 'video URL') except ExtractorError: message = self._html_search_regex( r'(?s)<div[^>]+class=(["\']).*?msgboxinfo.*?\1[^>]*>(?P<message>.+?)</div>', webpage, 'message', default=None, group='message') if message: raise ExtractorError('%s said: %s' % (self.IE_NAME, message), expected=True) raise thumbnail = self._search_regex( r'image:\s*"([^"]+)"', webpage, 'thumbnail URL', fatal=False) return { 'id': video_id, 'title': title, 'url': video_url, 'thumbnail': thumbnail, 'http_headers': { 'Referer': url, }, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/streamcz.py
# coding: utf-8 from __future__ import unicode_literals import hashlib import time from .common import InfoExtractor from ..utils import ( int_or_none, sanitized_Request, ) def _get_api_key(api_path): if api_path.endswith('?'): api_path = api_path[:-1] api_key = 'fb5f58a820353bd7095de526253c14fd' a = '{0:}{1:}{2:}'.format(api_key, api_path, int(round(time.time() / 24 / 3600))) return hashlib.md5(a.encode('ascii')).hexdigest() class StreamCZIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?stream\.cz/.+/(?P<id>[0-9]+)' _API_URL = 'http://www.stream.cz/API' _TESTS = [{ 'url': 'http://www.stream.cz/peklonataliri/765767-ecka-pro-deti', 'md5': '934bb6a6d220d99c010783c9719960d5', 'info_dict': { 'id': '765767', 'ext': 'mp4', 'title': 'Peklo na talíři: Éčka pro děti', 'description': 'Taška s grónskou pomazánkou a další pekelnosti ZDE', 'thumbnail': 're:^http://im.stream.cz/episode/52961d7e19d423f8f06f0100', 'duration': 256, }, }, { 'url': 'http://www.stream.cz/blanik/10002447-tri-roky-pro-mazanka', 'md5': '849a88c1e1ca47d41403c2ba5e59e261', 'info_dict': { 'id': '10002447', 'ext': 'mp4', 'title': 'Kancelář Blaník: Tři roky pro Mazánka', 'description': 'md5:3862a00ba7bf0b3e44806b544032c859', 'thumbnail': 're:^http://im.stream.cz/episode/537f838c50c11f8d21320000', 'duration': 368, }, }] def _real_extract(self, url): video_id = self._match_id(url) api_path = '/episode/%s' % video_id req = sanitized_Request(self._API_URL + api_path) req.add_header('Api-Password', _get_api_key(api_path)) data = self._download_json(req, video_id) formats = [] for quality, video in enumerate(data['video_qualities']): for f in video['formats']: typ = f['type'].partition('/')[2] qlabel = video.get('quality_label') formats.append({ 'format_note': '%s-%s' % (qlabel, typ) if qlabel else typ, 'format_id': '%s-%s' % (typ, f['quality']), 'url': f['source'], 'height': int_or_none(f['quality'].rstrip('p')), 'quality': quality, }) self._sort_formats(formats) image = data.get('image') if image: thumbnail = self._proto_relative_url( image.replace('{width}', '1240').replace('{height}', '697'), scheme='http:', ) else: thumbnail = None stream = data.get('_embedded', {}).get('stream:show', {}).get('name') if stream: title = '%s: %s' % (stream, data['name']) else: title = data['name'] subtitles = {} srt_url = data.get('subtitles_srt') if srt_url: subtitles['cs'] = [{ 'ext': 'srt', 'url': srt_url, }] return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'formats': formats, 'description': data.get('web_site_text'), 'duration': int_or_none(data.get('duration')), 'view_count': int_or_none(data.get('views')), 'subtitles': subtitles, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/streetvoice.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import unified_strdate class StreetVoiceIE(InfoExtractor): _VALID_URL = r'https?://(?:.+?\.)?streetvoice\.com/[^/]+/songs/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://streetvoice.com/skippylu/songs/94440/', 'md5': '15974627fc01a29e492c98593c2fd472', 'info_dict': { 'id': '94440', 'ext': 'mp3', 'title': '輸', 'description': 'Crispy脆樂團 - 輸', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 260, 'upload_date': '20091018', 'uploader': 'Crispy脆樂團', 'uploader_id': '627810', } }, { 'url': 'http://tw.streetvoice.com/skippylu/songs/94440/', 'only_matching': True, }] def _real_extract(self, url): song_id = self._match_id(url) song = self._download_json( 'https://streetvoice.com/api/v1/public/song/%s/' % song_id, song_id, data=b'') title = song['name'] author = song['user']['nickname'] return { 'id': song_id, 'url': song['file'], 'title': title, 'description': '%s - %s' % (author, title), 'thumbnail': self._proto_relative_url(song.get('image'), 'http:'), 'duration': song.get('length'), 'upload_date': unified_strdate(song.get('created_at')), 'uploader': author, 'uploader_id': compat_str(song['user']['id']), }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/stretchinternet.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import int_or_none class StretchInternetIE(InfoExtractor): _VALID_URL = r'https?://portal\.stretchinternet\.com/[^/]+/portal\.htm\?.*?\beventId=(?P<id>\d+)' _TEST = { 'url': 'https://portal.stretchinternet.com/umary/portal.htm?eventId=313900&streamType=video', 'info_dict': { 'id': '313900', 'ext': 'mp4', 'title': 'Augustana (S.D.) Baseball vs University of Mary', 'description': 'md5:7578478614aae3bdd4a90f578f787438', 'timestamp': 1490468400, 'upload_date': '20170325', } } def _real_extract(self, url): video_id = self._match_id(url) stream = self._download_json( 'https://neo-client.stretchinternet.com/streamservice/v1/media/stream/v%s' % video_id, video_id) video_url = 'https://%s' % stream['source'] event = self._download_json( 'https://neo-client.stretchinternet.com/portal-ws/getEvent.json', video_id, query={ 'clientID': 99997, 'eventID': video_id, 'token': 'asdf', })['event'] title = event.get('title') or event['mobileTitle'] description = event.get('customText') timestamp = int_or_none(event.get('longtime')) return { 'id': video_id, 'title': title, 'description': description, 'timestamp': timestamp, 'url': video_url, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/stv.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( compat_str, float_or_none, int_or_none, ) class STVPlayerIE(InfoExtractor): IE_NAME = 'stv:player' _VALID_URL = r'https?://player\.stv\.tv/(?P<type>episode|video)/(?P<id>[a-z0-9]{4})' _TEST = { 'url': 'https://player.stv.tv/video/4gwd/emmerdale/60-seconds-on-set-with-laura-norton/', 'md5': '5adf9439c31d554f8be0707c7abe7e0a', 'info_dict': { 'id': '5333973339001', 'ext': 'mp4', 'upload_date': '20170301', 'title': '60 seconds on set with Laura Norton', 'description': "How many questions can Laura - a.k.a Kerry Wyatt - answer in 60 seconds? Let\'s find out!", 'timestamp': 1488388054, 'uploader_id': '1486976045', }, 'skip': 'this resource is unavailable outside of the UK', } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1486976045/default_default/index.html?videoId=%s' _PTYPE_MAP = { 'episode': 'episodes', 'video': 'shortform', } def _real_extract(self, url): ptype, video_id = re.match(self._VALID_URL, url).groups() resp = self._download_json( 'https://player.api.stv.tv/v1/%s/%s' % (self._PTYPE_MAP[ptype], video_id), video_id) result = resp['results'] video = result['video'] video_id = compat_str(video['id']) subtitles = {} _subtitles = result.get('_subtitles') or {} for ext, sub_url in _subtitles.items(): subtitles.setdefault('en', []).append({ 'ext': 'vtt' if ext == 'webvtt' else ext, 'url': sub_url, }) programme = result.get('programme') or {} return { '_type': 'url_transparent', 'id': video_id, 'url': self.BRIGHTCOVE_URL_TEMPLATE % video_id, 'description': result.get('summary'), 'duration': float_or_none(video.get('length'), 1000), 'subtitles': subtitles, 'view_count': int_or_none(result.get('views')), 'series': programme.get('name') or programme.get('shortName'), 'ie_key': 'BrightcoveNew', }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sunporno.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( parse_duration, int_or_none, qualities, determine_ext, ) class SunPornoIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www\.)?sunporno\.com/videos|embeds\.sunporno\.com/embed)/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.sunporno.com/videos/807778/', 'md5': '507887e29033502f29dba69affeebfc9', 'info_dict': { 'id': '807778', 'ext': 'mp4', 'title': 'md5:0a400058e8105d39e35c35e7c5184164', 'description': 'md5:a31241990e1bd3a64e72ae99afb325fb', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 302, 'age_limit': 18, } }, { 'url': 'http://embeds.sunporno.com/embed/807778', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'http://www.sunporno.com/videos/%s' % video_id, video_id) title = self._html_search_regex( r'<title>([^<]+)</title>', webpage, 'title') description = self._html_search_meta( 'description', webpage, 'description') thumbnail = self._html_search_regex( r'poster="([^"]+)"', webpage, 'thumbnail', fatal=False) duration = parse_duration(self._search_regex( (r'itemprop="duration"[^>]*>\s*(\d+:\d+)\s*<', r'>Duration:\s*<span[^>]+>\s*(\d+:\d+)\s*<'), webpage, 'duration', fatal=False)) view_count = int_or_none(self._html_search_regex( r'class="views">(?:<noscript>)?\s*(\d+)\s*<', webpage, 'view count', fatal=False)) comment_count = int_or_none(self._html_search_regex( r'(\d+)</b> Comments?', webpage, 'comment count', fatal=False, default=None)) formats = [] quality = qualities(['mp4', 'flv']) for video_url in re.findall(r'<(?:source|video) src="([^"]+)"', webpage): video_ext = determine_ext(video_url) formats.append({ 'url': video_url, 'format_id': video_ext, 'quality': quality(video_ext), }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'view_count': view_count, 'comment_count': comment_count, 'formats': formats, 'age_limit': 18, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sverigesradio.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, str_or_none, ) class SverigesRadioBaseIE(InfoExtractor): _BASE_URL = 'https://sverigesradio.se/sida/playerajax/' _QUALITIES = ['low', 'medium', 'high'] _EXT_TO_CODEC_MAP = { 'mp3': 'mp3', 'm4a': 'aac', } _CODING_FORMAT_TO_ABR_MAP = { 5: 128, 11: 192, 12: 32, 13: 96, } def _real_extract(self, url): audio_id = self._match_id(url) query = { 'id': audio_id, 'type': self._AUDIO_TYPE, } item = self._download_json( self._BASE_URL + 'audiometadata', audio_id, 'Downloading audio JSON metadata', query=query)['items'][0] title = item['subtitle'] query['format'] = 'iis' urls = [] formats = [] for quality in self._QUALITIES: query['quality'] = quality audio_url_data = self._download_json( self._BASE_URL + 'getaudiourl', audio_id, 'Downloading %s format JSON metadata' % quality, fatal=False, query=query) or {} audio_url = audio_url_data.get('audioUrl') if not audio_url or audio_url in urls: continue urls.append(audio_url) ext = determine_ext(audio_url) coding_format = audio_url_data.get('codingFormat') abr = int_or_none(self._search_regex( r'_a(\d+)\.m4a', audio_url, 'audio bitrate', default=None)) or self._CODING_FORMAT_TO_ABR_MAP.get(coding_format) formats.append({ 'abr': abr, 'acodec': self._EXT_TO_CODEC_MAP.get(ext), 'ext': ext, 'format_id': str_or_none(coding_format), 'vcodec': 'none', 'url': audio_url, }) self._sort_formats(formats) return { 'id': audio_id, 'title': title, 'formats': formats, 'series': item.get('title'), 'duration': int_or_none(item.get('duration')), 'thumbnail': item.get('displayimageurl'), 'description': item.get('description'), } class SverigesRadioPublicationIE(SverigesRadioBaseIE): IE_NAME = 'sverigesradio:publication' _VALID_URL = r'https?://(?:www\.)?sverigesradio\.se/sida/(?:artikel|gruppsida)\.aspx\?.*?\bartikel=(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://sverigesradio.se/sida/artikel.aspx?programid=83&artikel=7038546', 'md5': '6a4917e1923fccb080e5a206a5afa542', 'info_dict': { 'id': '7038546', 'ext': 'm4a', 'duration': 132, 'series': 'Nyheter (Ekot)', 'title': 'Esa Teittinen: Sanningen har inte kommit fram', 'description': 'md5:daf7ce66a8f0a53d5465a5984d3839df', 'thumbnail': r're:^https?://.*\.jpg', }, }, { 'url': 'https://sverigesradio.se/sida/gruppsida.aspx?programid=3304&grupp=6247&artikel=7146887', 'only_matching': True, }] _AUDIO_TYPE = 'publication' class SverigesRadioEpisodeIE(SverigesRadioBaseIE): IE_NAME = 'sverigesradio:episode' _VALID_URL = r'https?://(?:www\.)?sverigesradio\.se/(?:sida/)?avsnitt/(?P<id>[0-9]+)' _TEST = { 'url': 'https://sverigesradio.se/avsnitt/1140922?programid=1300', 'md5': '20dc4d8db24228f846be390b0c59a07c', 'info_dict': { 'id': '1140922', 'ext': 'mp3', 'duration': 3307, 'series': 'Konflikt', 'title': 'Metoo och valen', 'description': 'md5:fcb5c1f667f00badcc702b196f10a27e', 'thumbnail': r're:^https?://.*\.jpg', } } _AUDIO_TYPE = 'episode'
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/svt.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urllib_parse_urlparse, ) from ..utils import ( determine_ext, dict_get, int_or_none, orderedSet, strip_or_none, try_get, urljoin, compat_str, ) class SVTBaseIE(InfoExtractor): _GEO_COUNTRIES = ['SE'] def _extract_video(self, video_info, video_id): is_live = dict_get(video_info, ('live', 'simulcast'), default=False) m3u8_protocol = 'm3u8' if is_live else 'm3u8_native' formats = [] for vr in video_info['videoReferences']: player_type = vr.get('playerType') or vr.get('format') vurl = vr['url'] ext = determine_ext(vurl) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( vurl, video_id, ext='mp4', entry_protocol=m3u8_protocol, m3u8_id=player_type, fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( vurl + '?hdcore=3.3.0', video_id, f4m_id=player_type, fatal=False)) elif ext == 'mpd': if player_type == 'dashhbbtv': formats.extend(self._extract_mpd_formats( vurl, video_id, mpd_id=player_type, fatal=False)) else: formats.append({ 'format_id': player_type, 'url': vurl, }) if not formats and video_info.get('rights', {}).get('geoBlockedSweden'): self.raise_geo_restricted( 'This video is only available in Sweden', countries=self._GEO_COUNTRIES) self._sort_formats(formats) subtitles = {} subtitle_references = dict_get(video_info, ('subtitles', 'subtitleReferences')) if isinstance(subtitle_references, list): for sr in subtitle_references: subtitle_url = sr.get('url') subtitle_lang = sr.get('language', 'sv') if subtitle_url: if determine_ext(subtitle_url) == 'm3u8': # TODO(yan12125): handle WebVTT in m3u8 manifests continue subtitles.setdefault(subtitle_lang, []).append({'url': subtitle_url}) title = video_info.get('title') series = video_info.get('programTitle') season_number = int_or_none(video_info.get('season')) episode = video_info.get('episodeTitle') episode_number = int_or_none(video_info.get('episodeNumber')) duration = int_or_none(dict_get(video_info, ('materialLength', 'contentDuration'))) age_limit = None adult = dict_get( video_info, ('inappropriateForChildren', 'blockedForChildren'), skip_false_values=False) if adult is not None: age_limit = 18 if adult else 0 return { 'id': video_id, 'title': title, 'formats': formats, 'subtitles': subtitles, 'duration': duration, 'age_limit': age_limit, 'series': series, 'season_number': season_number, 'episode': episode, 'episode_number': episode_number, 'is_live': is_live, } class SVTIE(SVTBaseIE): _VALID_URL = r'https?://(?:www\.)?svt\.se/wd\?(?:.*?&)?widgetId=(?P<widget_id>\d+)&.*?\barticleId=(?P<id>\d+)' _TEST = { 'url': 'http://www.svt.se/wd?widgetId=23991&sectionId=541&articleId=2900353&type=embed&contextSectionId=123&autostart=false', 'md5': '33e9a5d8f646523ce0868ecfb0eed77d', 'info_dict': { 'id': '2900353', 'ext': 'mp4', 'title': 'Stjärnorna skojar till det - under SVT-intervjun', 'duration': 27, 'age_limit': 0, }, } @staticmethod def _extract_url(webpage): mobj = re.search( r'(?:<iframe src|href)="(?P<url>%s[^"]*)"' % SVTIE._VALID_URL, webpage) if mobj: return mobj.group('url') def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) widget_id = mobj.group('widget_id') article_id = mobj.group('id') info = self._download_json( 'http://www.svt.se/wd?widgetId=%s&articleId=%s&format=json&type=embed&output=json' % (widget_id, article_id), article_id) info_dict = self._extract_video(info['video'], article_id) info_dict['title'] = info['context']['title'] return info_dict class SVTPlayBaseIE(SVTBaseIE): _SVTPLAY_RE = r'root\s*\[\s*(["\'])_*svtplay\1\s*\]\s*=\s*(?P<json>{.+?})\s*;\s*\n' class SVTPlayIE(SVTPlayBaseIE): IE_DESC = 'SVT Play and Öppet arkiv' _VALID_URL = r'''(?x) (?: svt:(?P<svt_id>[^/?#&]+)| https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se/(?:video|klipp|kanaler)/(?P<id>[^/?#&]+) ) ''' _TESTS = [{ 'url': 'http://www.svtplay.se/video/5996901/flygplan-till-haile-selassie/flygplan-till-haile-selassie-2', 'md5': '2b6704fe4a28801e1a098bbf3c5ac611', 'info_dict': { 'id': '5996901', 'ext': 'mp4', 'title': 'Flygplan till Haile Selassie', 'duration': 3527, 'thumbnail': r're:^https?://.*[\.-]jpg$', 'age_limit': 0, 'subtitles': { 'sv': [{ 'ext': 'wsrt', }] }, }, }, { # geo restricted to Sweden 'url': 'http://www.oppetarkiv.se/video/5219710/trollflojten', 'only_matching': True, }, { 'url': 'http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg', 'only_matching': True, }, { 'url': 'https://www.svtplay.se/kanaler/svt1', 'only_matching': True, }, { 'url': 'svt:1376446-003A', 'only_matching': True, }, { 'url': 'svt:14278044', 'only_matching': True, }] def _adjust_title(self, info): if info['is_live']: info['title'] = self._live_title(info['title']) def _extract_by_video_id(self, video_id, webpage=None): data = self._download_json( 'https://api.svt.se/videoplayer-api/video/%s' % video_id, video_id, headers=self.geo_verification_headers()) info_dict = self._extract_video(data, video_id) if not info_dict.get('title'): title = dict_get(info_dict, ('episode', 'series')) if not title and webpage: title = re.sub( r'\s*\|\s*.+?$', '', self._og_search_title(webpage)) if not title: title = video_id info_dict['title'] = title self._adjust_title(info_dict) return info_dict def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id, svt_id = mobj.group('id', 'svt_id') if svt_id: return self._extract_by_video_id(svt_id) webpage = self._download_webpage(url, video_id) data = self._parse_json( self._search_regex( self._SVTPLAY_RE, webpage, 'embedded data', default='{}', group='json'), video_id, fatal=False) thumbnail = self._og_search_thumbnail(webpage) if data: video_info = try_get( data, lambda x: x['context']['dispatcher']['stores']['VideoTitlePageStore']['data']['video'], dict) if video_info: info_dict = self._extract_video(video_info, video_id) info_dict.update({ 'title': data['context']['dispatcher']['stores']['MetaStore']['title'], 'thumbnail': thumbnail, }) self._adjust_title(info_dict) return info_dict svt_id = self._search_regex( r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)', webpage, 'video id') return self._extract_by_video_id(svt_id, webpage) class SVTSeriesIE(SVTPlayBaseIE): _VALID_URL = r'https?://(?:www\.)?svtplay\.se/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://www.svtplay.se/rederiet', 'info_dict': { 'id': 'rederiet', 'title': 'Rederiet', 'description': 'md5:505d491a58f4fcf6eb418ecab947e69e', }, 'playlist_mincount': 318, }, { 'url': 'https://www.svtplay.se/rederiet?tab=sasong2', 'info_dict': { 'id': 'rederiet-sasong2', 'title': 'Rederiet - Säsong 2', 'description': 'md5:505d491a58f4fcf6eb418ecab947e69e', }, 'playlist_count': 12, }] @classmethod def suitable(cls, url): return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTSeriesIE, cls).suitable(url) def _real_extract(self, url): series_id = self._match_id(url) qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query) season_slug = qs.get('tab', [None])[0] if season_slug: series_id += '-%s' % season_slug webpage = self._download_webpage( url, series_id, 'Downloading series page') root = self._parse_json( self._search_regex( self._SVTPLAY_RE, webpage, 'content', group='json'), series_id) season_name = None entries = [] for season in root['relatedVideoContent']['relatedVideosAccordion']: if not isinstance(season, dict): continue if season_slug: if season.get('slug') != season_slug: continue season_name = season.get('name') videos = season.get('videos') if not isinstance(videos, list): continue for video in videos: content_url = video.get('contentUrl') if not content_url or not isinstance(content_url, compat_str): continue entries.append( self.url_result( urljoin(url, content_url), ie=SVTPlayIE.ie_key(), video_title=video.get('title') )) metadata = root.get('metaData') if not isinstance(metadata, dict): metadata = {} title = metadata.get('title') season_name = season_name or season_slug if title and season_name: title = '%s - %s' % (title, season_name) elif season_slug: title = season_slug return self.playlist_result( entries, series_id, title, metadata.get('description')) class SVTPageIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?svt\.se/(?:[^/]+/)*(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://www.svt.se/sport/oseedat/guide-sommartraningen-du-kan-gora-var-och-nar-du-vill', 'info_dict': { 'id': 'guide-sommartraningen-du-kan-gora-var-och-nar-du-vill', 'title': 'GUIDE: Sommarträning du kan göra var och när du vill', }, 'playlist_count': 7, }, { 'url': 'https://www.svt.se/nyheter/inrikes/ebba-busch-thor-kd-har-delvis-ratt-om-no-go-zoner', 'info_dict': { 'id': 'ebba-busch-thor-kd-har-delvis-ratt-om-no-go-zoner', 'title': 'Ebba Busch Thor har bara delvis rätt om ”no-go-zoner”', }, 'playlist_count': 1, }, { # only programTitle 'url': 'http://www.svt.se/sport/ishockey/jagr-tacklar-giroux-under-intervjun', 'info_dict': { 'id': '2900353', 'ext': 'mp4', 'title': 'Stjärnorna skojar till det - under SVT-intervjun', 'duration': 27, 'age_limit': 0, }, }, { 'url': 'https://www.svt.se/nyheter/lokalt/vast/svt-testar-tar-nagon-upp-skrapet-1', 'only_matching': True, }, { 'url': 'https://www.svt.se/vader/manadskronikor/maj2018', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if SVTIE.suitable(url) else super(SVTPageIE, cls).suitable(url) def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) entries = [ self.url_result( 'svt:%s' % video_id, ie=SVTPlayIE.ie_key(), video_id=video_id) for video_id in orderedSet(re.findall( r'data-video-id=["\'](\d+)', webpage))] title = strip_or_none(self._og_search_title(webpage, default=None)) return self.playlist_result(entries, playlist_id, title)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/swrmediathek.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( parse_duration, int_or_none, determine_protocol, ) class SWRMediathekIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?swrmediathek\.de/(?:content/)?player\.htm\?show=(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _TESTS = [{ 'url': 'http://swrmediathek.de/player.htm?show=849790d0-dab8-11e3-a953-0026b975f2e6', 'md5': '8c5f6f0172753368547ca8413a7768ac', 'info_dict': { 'id': '849790d0-dab8-11e3-a953-0026b975f2e6', 'ext': 'mp4', 'title': 'SWR odysso', 'description': 'md5:2012e31baad36162e97ce9eb3f157b8a', 'thumbnail': r're:^http:.*\.jpg$', 'duration': 2602, 'upload_date': '20140515', 'uploader': 'SWR Fernsehen', 'uploader_id': '990030', }, }, { 'url': 'http://swrmediathek.de/player.htm?show=0e1a8510-ddf2-11e3-9be3-0026b975f2e6', 'md5': 'b10ab854f912eecc5a6b55cd6fc1f545', 'info_dict': { 'id': '0e1a8510-ddf2-11e3-9be3-0026b975f2e6', 'ext': 'mp4', 'title': 'Nachtcafé - Alltagsdroge Alkohol - zwischen Sektempfang und Komasaufen', 'description': 'md5:e0a3adc17e47db2c23aab9ebc36dbee2', 'thumbnail': r're:http://.*\.jpg', 'duration': 5305, 'upload_date': '20140516', 'uploader': 'SWR Fernsehen', 'uploader_id': '990030', }, 'skip': 'redirect to http://swrmediathek.de/index.htm?hinweis=swrlink', }, { 'url': 'http://swrmediathek.de/player.htm?show=bba23e10-cb93-11e3-bf7f-0026b975f2e6', 'md5': '4382e4ef2c9d7ce6852535fa867a0dd3', 'info_dict': { 'id': 'bba23e10-cb93-11e3-bf7f-0026b975f2e6', 'ext': 'mp3', 'title': 'Saša Stanišic: Vor dem Fest', 'description': 'md5:5b792387dc3fbb171eb709060654e8c9', 'thumbnail': r're:http://.*\.jpg', 'duration': 3366, 'upload_date': '20140520', 'uploader': 'SWR 2', 'uploader_id': '284670', }, 'skip': 'redirect to http://swrmediathek.de/index.htm?hinweis=swrlink', }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'http://swrmediathek.de/AjaxEntry?ekey=%s' % video_id, video_id, 'Downloading video JSON') attr = video['attr'] title = attr['entry_title'] media_type = attr.get('entry_etype') formats = [] for entry in video.get('sub', []): if entry.get('name') != 'entry_media': continue entry_attr = entry.get('attr', {}) f_url = entry_attr.get('val2') if not f_url: continue codec = entry_attr.get('val0') if codec == 'm3u8': formats.extend(self._extract_m3u8_formats( f_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif codec == 'f4m': formats.extend(self._extract_f4m_formats( f_url + '?hdcore=3.7.0', video_id, f4m_id='hds', fatal=False)) else: formats.append({ 'format_id': determine_protocol({'url': f_url}), 'url': f_url, 'quality': int_or_none(entry_attr.get('val1')), 'vcodec': codec if media_type == 'Video' else 'none', 'acodec': codec if media_type == 'Audio' else None, }) self._sort_formats(formats) upload_date = None entry_pdatet = attr.get('entry_pdatet') if entry_pdatet: upload_date = entry_pdatet[:-4] return { 'id': video_id, 'title': title, 'description': attr.get('entry_descl'), 'thumbnail': attr.get('entry_image_16_9'), 'duration': parse_duration(attr.get('entry_durat')), 'upload_date': upload_date, 'uploader': attr.get('channel_title'), 'uploader_id': attr.get('channel_idkey'), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/syfy.py
from __future__ import unicode_literals from .adobepass import AdobePassIE from ..utils import ( update_url_query, smuggle_url, ) class SyfyIE(AdobePassIE): _VALID_URL = r'https?://(?:www\.)?syfy\.com/(?:[^/]+/)?videos/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'http://www.syfy.com/theinternetruinedmylife/videos/the-internet-ruined-my-life-season-1-trailer', 'info_dict': { 'id': '2968097', 'ext': 'mp4', 'title': 'The Internet Ruined My Life: Season 1 Trailer', 'description': 'One tweet, one post, one click, can destroy everything.', 'uploader': 'NBCU-MPAT', 'upload_date': '20170113', 'timestamp': 1484345640, }, 'params': { # m3u8 download 'skip_download': True, }, 'add_ie': ['ThePlatform'], }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) syfy_mpx = list(self._parse_json(self._search_regex( r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', webpage, 'drupal settings'), display_id)['syfy']['syfy_mpx'].values())[0] video_id = syfy_mpx['mpxGUID'] title = syfy_mpx['episodeTitle'] query = { 'mbr': 'true', 'manifest': 'm3u', } if syfy_mpx.get('entitlement') == 'auth': resource = self._get_mvpd_resource( 'syfy', title, video_id, syfy_mpx.get('mpxRating', 'TV-14')) query['auth'] = self._extract_mvpd_auth( url, video_id, 'syfy', resource) return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': smuggle_url(update_url_query( self._proto_relative_url(syfy_mpx['releaseURL']), query), {'force_smil_url': True}), 'title': title, 'id': video_id, 'display_id': display_id, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/sztvhu.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class SztvHuIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)' _TEST = { 'url': 'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909', 'md5': 'a6df607b11fb07d0e9f2ad94613375cb', 'info_dict': { 'id': '20130909', 'ext': 'mp4', 'title': 'Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren', 'description': 'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_file = self._search_regex( r'file: "...:(.*?)",', webpage, 'video file') title = self._html_search_regex( r'<meta name="title" content="([^"]*?) - [^-]*? - [^-]*?"', webpage, 'video title') description = self._html_search_regex( r'<meta name="description" content="([^"]*)"/>', webpage, 'video description', fatal=False) thumbnail = self._og_search_thumbnail(webpage) video_url = 'http://media.sztv.hu/vod/' + video_file return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tagesschau.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, js_to_json, parse_iso8601, parse_filesize, ) class TagesschauPlayerIE(InfoExtractor): IE_NAME = 'tagesschau:player' _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?P<kind>audio|video)/(?P=kind)-(?P<id>\d+)~player(?:_[^/?#&]+)?\.html' _TESTS = [{ 'url': 'http://www.tagesschau.de/multimedia/video/video-179517~player.html', 'md5': '8d09548d5c15debad38bee3a4d15ca21', 'info_dict': { 'id': '179517', 'ext': 'mp4', 'title': 'Marie Kristin Boese, ARD Berlin, über den zukünftigen Kurs der AfD', 'thumbnail': r're:^https?:.*\.jpg$', 'formats': 'mincount:6', }, }, { 'url': 'https://www.tagesschau.de/multimedia/audio/audio-29417~player.html', 'md5': '76e6eec6ebd40740671cf0a2c88617e5', 'info_dict': { 'id': '29417', 'ext': 'mp3', 'title': 'Trabi - Bye, bye Rennpappe', 'thumbnail': r're:^https?:.*\.jpg$', 'formats': 'mincount:2', }, }, { 'url': 'http://www.tagesschau.de/multimedia/audio/audio-29417~player_autoplay-true.html', 'only_matching': True, }] _FORMATS = { 'xs': {'quality': 0}, 's': {'width': 320, 'height': 180, 'quality': 1}, 'm': {'width': 512, 'height': 288, 'quality': 2}, 'l': {'width': 960, 'height': 540, 'quality': 3}, 'xl': {'width': 1280, 'height': 720, 'quality': 4}, 'xxl': {'quality': 5}, } def _extract_via_api(self, kind, video_id): info = self._download_json( 'https://www.tagesschau.de/api/multimedia/{0}/{0}-{1}.json'.format(kind, video_id), video_id) title = info['headline'] formats = [] for media in info['mediadata']: for format_id, format_url in media.items(): if determine_ext(format_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')) else: formats.append({ 'url': format_url, 'format_id': format_id, 'vcodec': 'none' if kind == 'audio' else None, }) self._sort_formats(formats) timestamp = parse_iso8601(info.get('date')) return { 'id': video_id, 'title': title, 'timestamp': timestamp, 'formats': formats, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') # kind = mobj.group('kind').lower() # if kind == 'video': # return self._extract_via_api(kind, video_id) # JSON api does not provide some audio formats (e.g. ogg) thus # extractiong audio via webpage webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage).strip() formats = [] for media_json in re.findall(r'({src\s*:\s*["\']http[^}]+type\s*:[^}]+})', webpage): media = self._parse_json(js_to_json(media_json), video_id, fatal=False) if not media: continue src = media.get('src') if not src: return quality = media.get('quality') kind = media.get('type', '').split('/')[0] ext = determine_ext(src) f = { 'url': src, 'format_id': '%s_%s' % (quality, ext) if quality else ext, 'ext': ext, 'vcodec': 'none' if kind == 'audio' else None, } f.update(self._FORMATS.get(quality, {})) formats.append(f) self._sort_formats(formats) thumbnail = self._og_search_thumbnail(webpage) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'formats': formats, } class TagesschauIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/(?P<path>[^/]+/(?:[^/]+/)*?(?P<id>[^/#?]+?(?:-?[0-9]+)?))(?:~_?[^/#?]+?)?\.html' _TESTS = [{ 'url': 'http://www.tagesschau.de/multimedia/video/video-102143.html', 'md5': 'f7c27a0eff3bfe8c7727e65f8fe1b1e6', 'info_dict': { 'id': 'video-102143', 'ext': 'mp4', 'title': 'Regierungsumbildung in Athen: Neue Minister in Griechenland vereidigt', 'description': '18.07.2015 20:10 Uhr', 'thumbnail': r're:^https?:.*\.jpg$', }, }, { 'url': 'http://www.tagesschau.de/multimedia/sendung/ts-5727.html', 'md5': '3c54c1f6243d279b706bde660ceec633', 'info_dict': { 'id': 'ts-5727', 'ext': 'mp4', 'title': 'Sendung: tagesschau \t04.12.2014 20:00 Uhr', 'description': 'md5:695c01bfd98b7e313c501386327aea59', 'thumbnail': r're:^https?:.*\.jpg$', }, }, { # exclusive audio 'url': 'http://www.tagesschau.de/multimedia/audio/audio-29417.html', 'md5': '76e6eec6ebd40740671cf0a2c88617e5', 'info_dict': { 'id': 'audio-29417', 'ext': 'mp3', 'title': 'Trabi - Bye, bye Rennpappe', 'description': 'md5:8687dda862cbbe2cfb2df09b56341317', 'thumbnail': r're:^https?:.*\.jpg$', }, }, { # audio in article 'url': 'http://www.tagesschau.de/inland/bnd-303.html', 'md5': 'e0916c623e85fc1d2b26b78f299d3958', 'info_dict': { 'id': 'bnd-303', 'ext': 'mp3', 'title': 'Viele Baustellen für neuen BND-Chef', 'description': 'md5:1e69a54be3e1255b2b07cdbce5bcd8b4', 'thumbnail': r're:^https?:.*\.jpg$', }, }, { 'url': 'http://www.tagesschau.de/inland/afd-parteitag-135.html', 'info_dict': { 'id': 'afd-parteitag-135', 'title': 'Möchtegern-Underdog mit Machtanspruch', }, 'playlist_count': 2, }, { 'url': 'http://www.tagesschau.de/multimedia/sendung/tsg-3771.html', 'only_matching': True, }, { 'url': 'http://www.tagesschau.de/multimedia/sendung/tt-3827.html', 'only_matching': True, }, { 'url': 'http://www.tagesschau.de/multimedia/sendung/nm-3475.html', 'only_matching': True, }, { 'url': 'http://www.tagesschau.de/multimedia/sendung/weltspiegel-3167.html', 'only_matching': True, }, { 'url': 'http://www.tagesschau.de/multimedia/tsvorzwanzig-959.html', 'only_matching': True, }, { 'url': 'http://www.tagesschau.de/multimedia/sendung/bab/bab-3299~_bab-sendung-209.html', 'only_matching': True, }, { 'url': 'http://www.tagesschau.de/multimedia/video/video-102303~_bab-sendung-211.html', 'only_matching': True, }, { 'url': 'http://www.tagesschau.de/100sekunden/index.html', 'only_matching': True, }, { # playlist article with collapsing sections 'url': 'http://www.tagesschau.de/wirtschaft/faq-freihandelszone-eu-usa-101.html', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if TagesschauPlayerIE.suitable(url) else super(TagesschauIE, cls).suitable(url) def _extract_formats(self, download_text, media_kind): links = re.finditer( r'<div class="button" title="(?P<title>[^"]*)"><a href="(?P<url>[^"]+)">(?P<name>.+?)</a></div>', download_text) formats = [] for l in links: link_url = l.group('url') if not link_url: continue format_id = self._search_regex( r'.*/[^/.]+\.([^/]+)\.[^/.]+$', link_url, 'format ID', default=determine_ext(link_url)) format = { 'format_id': format_id, 'url': l.group('url'), 'format_name': l.group('name'), } title = l.group('title') if title: if media_kind.lower() == 'video': m = re.match( r'''(?x) Video:\s*(?P<vcodec>[a-zA-Z0-9/._-]+)\s*&\#10; (?P<width>[0-9]+)x(?P<height>[0-9]+)px&\#10; (?P<vbr>[0-9]+)kbps&\#10; Audio:\s*(?P<abr>[0-9]+)kbps,\s*(?P<audio_desc>[A-Za-z\.0-9]+)&\#10; Gr&ouml;&szlig;e:\s*(?P<filesize_approx>[0-9.,]+\s+[a-zA-Z]*B)''', title) if m: format.update({ 'format_note': m.group('audio_desc'), 'vcodec': m.group('vcodec'), 'width': int(m.group('width')), 'height': int(m.group('height')), 'abr': int(m.group('abr')), 'vbr': int(m.group('vbr')), 'filesize_approx': parse_filesize(m.group('filesize_approx')), }) else: m = re.match( r'(?P<format>.+?)-Format\s*:\s*(?P<abr>\d+)kbps\s*,\s*(?P<note>.+)', title) if m: format.update({ 'format_note': '%s, %s' % (m.group('format'), m.group('note')), 'vcodec': 'none', 'abr': int(m.group('abr')), }) formats.append(format) self._sort_formats(formats) return formats def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') or mobj.group('path') display_id = video_id.lstrip('-') webpage = self._download_webpage(url, display_id) title = self._html_search_regex( r'<span[^>]*class="headline"[^>]*>(.+?)</span>', webpage, 'title', default=None) or self._og_search_title(webpage) DOWNLOAD_REGEX = r'(?s)<p>Wir bieten dieses (?P<kind>Video|Audio) in folgenden Formaten zum Download an:</p>\s*<div class="controls">(?P<links>.*?)</div>\s*<p>' webpage_type = self._og_search_property('type', webpage, default=None) if webpage_type == 'website': # Article entries = [] for num, (entry_title, media_kind, download_text) in enumerate(re.findall( r'(?s)<p[^>]+class="infotext"[^>]*>\s*(?:<a[^>]+>)?\s*<strong>(.+?)</strong>.*?</p>.*?%s' % DOWNLOAD_REGEX, webpage), 1): entries.append({ 'id': '%s-%d' % (display_id, num), 'title': '%s' % entry_title, 'formats': self._extract_formats(download_text, media_kind), }) if len(entries) > 1: return self.playlist_result(entries, display_id, title) formats = entries[0]['formats'] else: # Assume single video download_text = self._search_regex( DOWNLOAD_REGEX, webpage, 'download links', group='links') media_kind = self._search_regex( DOWNLOAD_REGEX, webpage, 'media kind', default='Video', group='kind') formats = self._extract_formats(download_text, media_kind) thumbnail = self._og_search_thumbnail(webpage) description = self._html_search_regex( r'(?s)<p class="teasertext">(.*?)</p>', webpage, 'description', default=None) self._sort_formats(formats) return { 'id': display_id, 'title': title, 'thumbnail': thumbnail, 'formats': formats, 'description': description, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tass.py
# coding: utf-8 from __future__ import unicode_literals import json from .common import InfoExtractor from ..utils import ( js_to_json, qualities, ) class TassIE(InfoExtractor): _VALID_URL = r'https?://(?:tass\.ru|itar-tass\.com)/[^/]+/(?P<id>\d+)' _TESTS = [ { 'url': 'http://tass.ru/obschestvo/1586870', 'md5': '3b4cdd011bc59174596b6145cda474a4', 'info_dict': { 'id': '1586870', 'ext': 'mp4', 'title': 'Посетителям московского зоопарка показали красную панду', 'description': 'Приехавшую из Дублина Зейну можно увидеть в павильоне "Кошки тропиков"', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'http://itar-tass.com/obschestvo/1600009', 'only_matching': True, }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) sources = json.loads(js_to_json(self._search_regex( r'(?s)sources\s*:\s*(\[.+?\])', webpage, 'sources'))) quality = qualities(['sd', 'hd']) formats = [] for source in sources: video_url = source.get('file') if not video_url or not video_url.startswith('http') or not video_url.endswith('.mp4'): continue label = source.get('label') formats.append({ 'url': video_url, 'format_id': label, 'quality': quality(label), }) self._sort_formats(formats) return { 'id': video_id, 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), 'formats': formats, }
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tastytrade.py
from __future__ import unicode_literals from .common import InfoExtractor from .ooyala import OoyalaIE class TastyTradeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tastytrade\.com/tt/shows/[^/]+/episodes/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.tastytrade.com/tt/shows/market-measures/episodes/correlation-in-short-volatility-06-28-2017', 'info_dict': { 'id': 'F3bnlzbToeI6pLEfRyrlfooIILUjz4nM', 'ext': 'mp4', 'title': 'A History of Teaming', 'description': 'md5:2a9033db8da81f2edffa4c99888140b3', 'duration': 422.255, }, 'params': { 'skip_download': True, }, 'add_ie': ['Ooyala'], }, { 'url': 'https://www.tastytrade.com/tt/shows/daily-dose/episodes/daily-dose-06-30-2017', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) ooyala_code = self._search_regex( r'data-media-id=(["\'])(?P<code>(?:(?!\1).)+)\1', webpage, 'ooyala code', group='code') info = self._search_json_ld(webpage, display_id, fatal=False) info.update({ '_type': 'url_transparent', 'ie_key': OoyalaIE.ie_key(), 'url': 'ooyala:%s' % ooyala_code, 'display_id': display_id, }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tbs.py
# coding: utf-8 from __future__ import unicode_literals import re from .turner import TurnerBaseIE from ..compat import ( compat_urllib_parse_urlparse, compat_parse_qs, ) from ..utils import ( float_or_none, int_or_none, strip_or_none, ) class TBSIE(TurnerBaseIE): _VALID_URL = r'https?://(?:www\.)?(?P<site>tbs|tntdrama)\.com(?P<path>/(?:movies|shows/[^/]+/(?:clips|season-\d+/episode-\d+))/(?P<id>[^/?#]+))' _TESTS = [{ 'url': 'http://www.tntdrama.com/shows/the-alienist/clips/monster', 'info_dict': { 'id': '8d384cde33b89f3a43ce5329de42903ed5099887', 'ext': 'mp4', 'title': 'Monster', 'description': 'Get a first look at the theatrical trailer for TNT’s highly anticipated new psychological thriller The Alienist, which premieres January 22 on TNT.', 'timestamp': 1508175329, 'upload_date': '20171016', }, 'params': { # m3u8 download 'skip_download': True, } }, { 'url': 'http://www.tbs.com/shows/search-party/season-1/episode-1/explicit-the-mysterious-disappearance-of-the-girl-no-one-knew', 'only_matching': True, }, { 'url': 'http://www.tntdrama.com/movies/star-wars-a-new-hope', 'only_matching': True, }] def _real_extract(self, url): site, path, display_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, display_id) drupal_settings = self._parse_json(self._search_regex( r'<script[^>]+?data-drupal-selector="drupal-settings-json"[^>]*?>({.+?})</script>', webpage, 'drupal setting'), display_id) video_data = next(v for v in drupal_settings['turner_playlist'] if v.get('url') == path) media_id = video_data['mediaID'] title = video_data['title'] tokenizer_query = compat_parse_qs(compat_urllib_parse_urlparse( drupal_settings['ngtv_token_url']).query) info = self._extract_ngtv_info( media_id, tokenizer_query, { 'url': url, 'site_name': site[:3].upper(), 'auth_required': video_data.get('authRequired') == '1', }) thumbnails = [] for image_id, image in video_data.get('images', {}).items(): image_url = image.get('url') if not image_url or image.get('type') != 'video': continue i = { 'id': image_id, 'url': image_url, } mobj = re.search(r'(\d+)x(\d+)', image_url) if mobj: i.update({ 'width': int(mobj.group(1)), 'height': int(mobj.group(2)), }) thumbnails.append(i) info.update({ 'id': media_id, 'title': title, 'description': strip_or_none(video_data.get('descriptionNoTags') or video_data.get('shortDescriptionNoTags')), 'duration': float_or_none(video_data.get('duration')) or info.get('duration'), 'timestamp': int_or_none(video_data.get('created')), 'season_number': int_or_none(video_data.get('season')), 'episode_number': int_or_none(video_data.get('episode')), 'thumbnails': thumbnails, }) return info
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/tdslifeway.py
from __future__ import unicode_literals from .common import InfoExtractor class TDSLifewayIE(InfoExtractor): _VALID_URL = r'https?://tds\.lifeway\.com/v1/trainingdeliverysystem/courses/(?P<id>\d+)/index\.html' _TEST = { # From http://www.ministrygrid.com/training-viewer/-/training/t4g-2014-conference/the-gospel-by-numbers-4/the-gospel-by-numbers 'url': 'http://tds.lifeway.com/v1/trainingdeliverysystem/courses/3453494717001/index.html?externalRegistration=AssetId%7C34F466F1-78F3-4619-B2AB-A8EFFA55E9E9%21InstanceId%7C0%21UserId%7Caaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa&grouping=http%3A%2F%2Flifeway.com%2Fvideo%2F3453494717001&activity_id=http%3A%2F%2Flifeway.com%2Fvideo%2F3453494717001&content_endpoint=http%3A%2F%2Ftds.lifeway.com%2Fv1%2Ftrainingdeliverysystem%2FScormEngineInterface%2FTCAPI%2Fcontent%2F&actor=%7B%22name%22%3A%5B%22Guest%20Guest%22%5D%2C%22account%22%3A%5B%7B%22accountServiceHomePage%22%3A%22http%3A%2F%2Fscorm.lifeway.com%2F%22%2C%22accountName%22%3A%22aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa%22%7D%5D%2C%22objectType%22%3A%22Agent%22%7D&content_token=462a50b2-b6f9-4970-99b1-930882c499fb&registration=93d6ec8e-7f7b-4ed3-bbc8-a857913c0b2a&externalConfiguration=access%7CFREE%21adLength%7C-1%21assignOrgId%7C4AE36F78-299A-425D-91EF-E14A899B725F%21assignOrgParentId%7C%21courseId%7C%21isAnonymous%7Cfalse%21previewAsset%7Cfalse%21previewLength%7C-1%21previewMode%7Cfalse%21royalty%7CFREE%21sessionId%7C671422F9-8E79-48D4-9C2C-4EE6111EA1CD%21trackId%7C&auth=Basic%20OjhmZjk5MDBmLTBlYTMtNDJhYS04YjFlLWE4MWQ3NGNkOGRjYw%3D%3D&endpoint=http%3A%2F%2Ftds.lifeway.com%2Fv1%2Ftrainingdeliverysystem%2FScormEngineInterface%2FTCAPI%2F', 'info_dict': { 'id': '3453494717001', 'ext': 'mp4', 'title': 'The Gospel by Numbers', 'thumbnail': r're:^https?://.*\.jpg', 'upload_date': '20140410', 'description': 'Coming soon from T4G 2014!', 'uploader_id': '2034960640001', 'timestamp': 1397145591, }, 'params': { # m3u8 download 'skip_download': True, }, 'add_ie': ['BrightcoveNew'], } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/2034960640001/default_default/index.html?videoId=%s' def _real_extract(self, url): brightcove_id = self._match_id(url) return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/teachable.py
from __future__ import unicode_literals import re from .common import InfoExtractor from .wistia import WistiaIE from ..compat import compat_str from ..utils import ( clean_html, ExtractorError, get_element_by_class, urlencode_postdata, urljoin, ) class TeachableBaseIE(InfoExtractor): _NETRC_MACHINE = 'teachable' _URL_PREFIX = 'teachable:' _SITES = { # Only notable ones here 'upskillcourses.com': 'upskill', 'academy.gns3.com': 'gns3', 'academyhacker.com': 'academyhacker', 'stackskills.com': 'stackskills', 'market.saleshacker.com': 'saleshacker', 'learnability.org': 'learnability', 'edurila.com': 'edurila', 'courses.workitdaily.com': 'workitdaily', } _VALID_URL_SUB_TUPLE = (_URL_PREFIX, '|'.join(re.escape(site) for site in _SITES.keys())) def _real_initialize(self): self._logged_in = False def _login(self, site): if self._logged_in: return username, password = self._get_login_info( netrc_machine=self._SITES.get(site, site)) if username is None: return login_page, urlh = self._download_webpage_handle( 'https://%s/sign_in' % site, None, 'Downloading %s login page' % site) def is_logged(webpage): return any(re.search(p, webpage) for p in ( r'class=["\']user-signout', r'<a[^>]+\bhref=["\']/sign_out', r'Log\s+[Oo]ut\s*<')) if is_logged(login_page): self._logged_in = True return login_url = compat_str(urlh.geturl()) login_form = self._hidden_inputs(login_page) login_form.update({ 'user[email]': username, 'user[password]': password, }) post_url = self._search_regex( r'<form[^>]+action=(["\'])(?P<url>(?:(?!\1).)+)\1', login_page, 'post url', default=login_url, group='url') if not post_url.startswith('http'): post_url = urljoin(login_url, post_url) response = self._download_webpage( post_url, None, 'Logging in to %s' % site, data=urlencode_postdata(login_form), headers={ 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': login_url, }) if '>I accept the new Privacy Policy<' in response: raise ExtractorError( 'Unable to login: %s asks you to accept new Privacy Policy. ' 'Go to https://%s/ and accept.' % (site, site), expected=True) # Successful login if is_logged(response): self._logged_in = True return message = get_element_by_class('alert', response) if message is not None: raise ExtractorError( 'Unable to login: %s' % clean_html(message), expected=True) raise ExtractorError('Unable to log in') class TeachableIE(TeachableBaseIE): _VALID_URL = r'''(?x) (?: %shttps?://(?P<site_t>[^/]+)| https?://(?:www\.)?(?P<site>%s) ) /courses/[^/]+/lectures/(?P<id>\d+) ''' % TeachableBaseIE._VALID_URL_SUB_TUPLE _TESTS = [{ 'url': 'http://upskillcourses.com/courses/essential-web-developer-course/lectures/1747100', 'info_dict': { 'id': 'uzw6zw58or', 'ext': 'mp4', 'title': 'Welcome to the Course!', 'description': 'md5:65edb0affa582974de4625b9cdea1107', 'duration': 138.763, 'timestamp': 1479846621, 'upload_date': '20161122', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://upskillcourses.com/courses/119763/lectures/1747100', 'only_matching': True, }, { 'url': 'https://academy.gns3.com/courses/423415/lectures/6885939', 'only_matching': True, }, { 'url': 'teachable:https://upskillcourses.com/courses/essential-web-developer-course/lectures/1747100', 'only_matching': True, }] @staticmethod def _is_teachable(webpage): return 'teachableTracker.linker:autoLink' in webpage and re.search( r'<link[^>]+href=["\']https?://process\.fs\.teachablecdn\.com', webpage) @staticmethod def _extract_url(webpage, source_url): if not TeachableIE._is_teachable(webpage): return if re.match(r'https?://[^/]+/(?:courses|p)', source_url): return '%s%s' % (TeachableBaseIE._URL_PREFIX, source_url) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) site = mobj.group('site') or mobj.group('site_t') video_id = mobj.group('id') self._login(site) prefixed = url.startswith(self._URL_PREFIX) if prefixed: url = url[len(self._URL_PREFIX):] webpage = self._download_webpage(url, video_id) wistia_url = WistiaIE._extract_url(webpage) if not wistia_url: if any(re.search(p, webpage) for p in ( r'class=["\']lecture-contents-locked', r'>\s*Lecture contents locked', r'id=["\']lecture-locked')): self.raise_login_required('Lecture contents locked') title = self._og_search_title(webpage, default=None) return { '_type': 'url_transparent', 'url': wistia_url, 'ie_key': WistiaIE.ie_key(), 'title': title, } class TeachableCourseIE(TeachableBaseIE): _VALID_URL = r'''(?x) (?: %shttps?://(?P<site_t>[^/]+)| https?://(?:www\.)?(?P<site>%s) ) /(?:courses|p)/(?:enrolled/)?(?P<id>[^/?#&]+) ''' % TeachableBaseIE._VALID_URL_SUB_TUPLE _TESTS = [{ 'url': 'http://upskillcourses.com/courses/essential-web-developer-course/', 'info_dict': { 'id': 'essential-web-developer-course', 'title': 'The Essential Web Developer Course (Free)', }, 'playlist_count': 192, }, { 'url': 'http://upskillcourses.com/courses/119763/', 'only_matching': True, }, { 'url': 'http://upskillcourses.com/courses/enrolled/119763', 'only_matching': True, }, { 'url': 'https://academy.gns3.com/courses/enrolled/423415', 'only_matching': True, }, { 'url': 'teachable:https://learn.vrdev.school/p/gear-vr-developer-mini', 'only_matching': True, }, { 'url': 'teachable:https://filmsimplified.com/p/davinci-resolve-15-crash-course', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if TeachableIE.suitable(url) else super( TeachableCourseIE, cls).suitable(url) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) site = mobj.group('site') or mobj.group('site_t') course_id = mobj.group('id') self._login(site) prefixed = url.startswith(self._URL_PREFIX) if prefixed: prefix = self._URL_PREFIX url = url[len(prefix):] webpage = self._download_webpage(url, course_id) url_base = 'https://%s/' % site entries = [] for mobj in re.finditer( r'(?s)(?P<li><li[^>]+class=(["\'])(?:(?!\2).)*?section-item[^>]+>.+?</li>)', webpage): li = mobj.group('li') if 'fa-youtube-play' not in li: continue lecture_url = self._search_regex( r'<a[^>]+href=(["\'])(?P<url>(?:(?!\1).)+)\1', li, 'lecture url', default=None, group='url') if not lecture_url: continue lecture_id = self._search_regex( r'/lectures/(\d+)', lecture_url, 'lecture id', default=None) title = self._html_search_regex( r'<span[^>]+class=["\']lecture-name[^>]+>([^<]+)', li, 'title', default=None) entry_url = urljoin(url_base, lecture_url) if prefixed: entry_url = self._URL_PREFIX + entry_url entries.append( self.url_result( entry_url, ie=TeachableIE.ie_key(), video_id=lecture_id, video_title=clean_html(title))) course_title = self._html_search_regex( (r'(?s)<img[^>]+class=["\']course-image[^>]+>\s*<h\d>(.+?)</h', r'(?s)<h\d[^>]+class=["\']course-title[^>]+>(.+?)</h'), webpage, 'course title', fatal=False) return self.playlist_result(entries, course_id, course_title)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/teachertube.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, ExtractorError, qualities, ) class TeacherTubeIE(InfoExtractor): IE_NAME = 'teachertube' IE_DESC = 'teachertube.com videos' _VALID_URL = r'https?://(?:www\.)?teachertube\.com/(viewVideo\.php\?video_id=|music\.php\?music_id=|video/(?:[\da-z-]+-)?|audio/)(?P<id>\d+)' _TESTS = [{ # flowplayer 'url': 'http://www.teachertube.com/viewVideo.php?video_id=339997', 'md5': 'f9434ef992fd65936d72999951ee254c', 'info_dict': { 'id': '339997', 'ext': 'mp4', 'title': 'Measures of dispersion from a frequency table', 'description': 'Measures of dispersion from a frequency table', 'thumbnail': r're:https?://.*\.(?:jpg|png)', }, }, { # jwplayer 'url': 'http://www.teachertube.com/music.php?music_id=8805', 'md5': '01e8352006c65757caf7b961f6050e21', 'info_dict': { 'id': '8805', 'ext': 'mp3', 'title': 'PER ASPERA AD ASTRA', 'description': 'RADIJSKA EMISIJA ZRAKOPLOVNE TEHNI?KE ?KOLE P', }, }, { # unavailable video 'url': 'http://www.teachertube.com/video/intro-video-schleicher-297790', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) error = self._search_regex( r'<div\b[^>]+\bclass=["\']msgBox error[^>]+>([^<]+)', webpage, 'error', default=None) if error: raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True) title = self._html_search_meta('title', webpage, 'title', fatal=True) TITLE_SUFFIX = ' - TeacherTube' if title.endswith(TITLE_SUFFIX): title = title[:-len(TITLE_SUFFIX)].strip() description = self._html_search_meta('description', webpage, 'description') if description: description = description.strip() quality = qualities(['mp3', 'flv', 'mp4']) media_urls = re.findall(r'data-contenturl="([^"]+)"', webpage) media_urls.extend(re.findall(r'var\s+filePath\s*=\s*"([^"]+)"', webpage)) media_urls.extend(re.findall(r'\'file\'\s*:\s*["\']([^"\']+)["\'],', webpage)) formats = [ { 'url': media_url, 'quality': quality(determine_ext(media_url)) } for media_url in set(media_urls) ] self._sort_formats(formats) thumbnail = self._og_search_thumbnail( webpage, default=None) or self._html_search_meta( 'thumbnail', webpage) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'formats': formats, } class TeacherTubeUserIE(InfoExtractor): IE_NAME = 'teachertube:user:collection' IE_DESC = 'teachertube.com user and collection videos' _VALID_URL = r'https?://(?:www\.)?teachertube\.com/(user/profile|collection)/(?P<user>[0-9a-zA-Z]+)/?' _MEDIA_RE = r'''(?sx) class="?sidebar_thumb_time"?>[0-9:]+</div> \s* <a\s+href="(https?://(?:www\.)?teachertube\.com/(?:video|audio)/[^"]+)" ''' _TEST = { 'url': 'http://www.teachertube.com/user/profile/rbhagwati2', 'info_dict': { 'id': 'rbhagwati2' }, 'playlist_mincount': 179, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) user_id = mobj.group('user') urls = [] webpage = self._download_webpage(url, user_id) urls.extend(re.findall(self._MEDIA_RE, webpage)) pages = re.findall(r'/ajax-user/user-videos/%s\?page=([0-9]+)' % user_id, webpage)[:-1] for p in pages: more = 'http://www.teachertube.com/ajax-user/user-videos/%s?page=%s' % (user_id, p) webpage = self._download_webpage(more, user_id, 'Downloading page %s/%s' % (p, len(pages))) video_urls = re.findall(self._MEDIA_RE, webpage) urls.extend(video_urls) entries = [self.url_result(vurl, 'TeacherTube') for vurl in urls] return self.playlist_result(entries, user_id)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/teachingchannel.py
from __future__ import unicode_literals import re from .common import InfoExtractor from .ooyala import OoyalaIE class TeachingChannelIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?teachingchannel\.org/videos/(?P<title>.+)' _TEST = { 'url': 'https://www.teachingchannel.org/videos/teacher-teaming-evolution', 'md5': '3d6361864d7cac20b57c8784da17166f', 'info_dict': { 'id': 'F3bnlzbToeI6pLEfRyrlfooIILUjz4nM', 'ext': 'mp4', 'title': 'A History of Teaming', 'description': 'md5:2a9033db8da81f2edffa4c99888140b3', 'duration': 422.255, }, 'params': { 'skip_download': True, }, 'add_ie': ['Ooyala'], } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) title = mobj.group('title') webpage = self._download_webpage(url, title) ooyala_code = self._search_regex( r'data-embed-code=\'(.+?)\'', webpage, 'ooyala code') return OoyalaIE._build_url_result(ooyala_code)
[]
[]
[]
archives/zwelinhtet129_PythonSampleB.zip
youtube-dl-master/youtube_dl/extractor/teamcoco.py
# coding: utf-8 from __future__ import unicode_literals import json from .turner import TurnerBaseIE from ..utils import ( determine_ext, ExtractorError, int_or_none, mimetype2ext, parse_duration, parse_iso8601, qualities, ) class TeamcocoIE(TurnerBaseIE): _VALID_URL = r'https?://(?:\w+\.)?teamcoco\.com/(?P<id>([^/]+/)*[^/?#]+)' _TESTS = [ { 'url': 'http://teamcoco.com/video/mary-kay-remote', 'md5': '55d532f81992f5c92046ad02fec34d7d', 'info_dict': { 'id': '80187', 'ext': 'mp4', 'title': 'Conan Becomes A Mary Kay Beauty Consultant', 'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.', 'duration': 495.0, 'upload_date': '20140402', 'timestamp': 1396407600, } }, { 'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush', 'md5': 'cde9ba0fa3506f5f017ce11ead928f9a', 'info_dict': { 'id': '19705', 'ext': 'mp4', 'description': 'Louis C.K. got starstruck by George W. Bush, so what? Part one.', 'title': 'Louis C.K. Interview Pt. 1 11/3/11', 'duration': 288, 'upload_date': '20111104', 'timestamp': 1320405840, } }, { 'url': 'http://teamcoco.com/video/timothy-olyphant-drinking-whiskey', 'info_dict': { 'id': '88748', 'ext': 'mp4', 'title': 'Timothy Olyphant Raises A Toast To “Justified”', 'description': 'md5:15501f23f020e793aeca761205e42c24', 'upload_date': '20150415', 'timestamp': 1429088400, }, 'params': { 'skip_download': True, # m3u8 downloads } }, { 'url': 'http://teamcoco.com/video/full-episode-mon-6-1-joel-mchale-jake-tapper-and-musical-guest-courtney-barnett?playlist=x;eyJ0eXBlIjoidGFnIiwiaWQiOjl9', 'info_dict': { 'id': '89341', 'ext': 'mp4', 'title': 'Full Episode - Mon. 6/1 - Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett', 'description': 'Guests: Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett', }, 'params': { 'skip_download': True, # m3u8 downloads }, 'skip': 'This video is no longer available.', }, { 'url': 'http://teamcoco.com/video/the-conan-audiencey-awards-for-04/25/18', 'only_matching': True, }, { 'url': 'http://teamcoco.com/italy/conan-jordan-schlansky-hit-the-streets-of-florence', 'only_matching': True, }, { 'url': 'http://teamcoco.com/haiti/conan-s-haitian-history-lesson', 'only_matching': True, }, { 'url': 'http://teamcoco.com/israel/conan-hits-the-streets-beaches-of-tel-aviv', 'only_matching': True, }, { 'url': 'https://conan25.teamcoco.com/video/ice-cube-kevin-hart-conan-share-lyft', 'only_matching': True, } ] def _graphql_call(self, query_template, object_type, object_id): find_object = 'find' + object_type return self._download_json( 'https://teamcoco.com/graphql', object_id, data=json.dumps({ 'query': query_template % (find_object, object_id) }).encode(), headers={ 'Content-Type': 'application/json', })['data'][find_object] def _real_extract(self, url): display_id = self._match_id(url) response = self._graphql_call('''{ %s(slug: "%s") { ... on RecordSlug { record { id title teaser publishOn thumb { preview } file { url } tags { name } duration turnerMediaId turnerMediaAuthToken } } ... on NotFoundSlug { status } } }''', 'Slug', display_id) if response.get('status'): raise ExtractorError('This video is no longer available.', expected=True) record = response['record'] video_id = record['id'] info = { 'id': video_id, 'display_id': display_id, 'title': record['title'], 'thumbnail': record.get('thumb', {}).get('preview'), 'description': record.get('teaser'), 'duration': parse_duration(record.get('duration')), 'timestamp': parse_iso8601(record.get('publishOn')), } media_id = record.get('turnerMediaId') if media_id: self._initialize_geo_bypass({ 'countries': ['US'], }) info.update(self._extract_ngtv_info(media_id, { 'accessToken': record['turnerMediaAuthToken'], 'accessTokenType': 'jws', })) else: d = self._download_json( 'https://teamcoco.com/_truman/d/' + video_id, video_id, fatal=False) or {} video_sources = d.get('meta') or {} if not video_sources: video_sources = self._graphql_call('''{ %s(id: "%s") { src } }''', 'RecordVideoSource', video_id) or {} formats = [] get_quality = qualities(['low', 'sd', 'hd', 'uhd']) for format_id, src in video_sources.get('src', {}).items(): if not isinstance(src, dict): continue src_url = src.get('src') if not src_url: continue ext = determine_ext(src_url, mimetype2ext(src.get('type'))) if format_id == 'hls' or ext == 'm3u8': # compat_urllib_parse.urljoin does not work here if src_url.startswith('/'): src_url = 'http://ht.cdn.turner.com/tbs/big/teamcoco' + src_url formats.extend(self._extract_m3u8_formats( src_url, video_id, 'mp4', m3u8_id=format_id, fatal=False)) else: if src_url.startswith('/mp4:protected/'): # TODO Correct extraction for these files continue tbr = int_or_none(self._search_regex( r'(\d+)k\.mp4', src_url, 'tbr', default=None)) formats.append({ 'url': src_url, 'ext': ext, 'tbr': tbr, 'format_id': format_id, 'quality': get_quality(format_id), }) if not formats: formats = self._extract_m3u8_formats( record['file']['url'], video_id, 'mp4', fatal=False) self._sort_formats(formats) info['formats'] = formats return info
[]
[]
[]