)
__license__ = 'Public Domain'
-__version__ = '2012.01.08b'
+__version__ = '2012.02.27'
UPDATE_URL = 'https://raw.github.com/rg3/youtube-dl/master/youtube-dl'
+
import cookielib
import datetime
+import getpass
import gzip
import htmlentitydefs
import HTMLParser
import locale
import math
import netrc
+import optparse
import os
import os.path
import re
+import shlex
import socket
import string
import subprocess
"""
assert type(s) == type(u'')
- return s.encode(sys.getfilesystemencoding(), 'ignore')
+
+ if sys.platform == 'win32' and sys.getwindowsversion().major >= 5:
+ # Pass u'' directly to use Unicode APIs on Windows 2000 and up
+ # (Detecting Windows NT 4 is tricky because 'major >= 4' would
+ # match Windows 9x series as well. Besides, NT 4 is obsolete.)
+ return s
+ else:
+ return s.encode(sys.getfilesystemencoding(), 'ignore')
class DownloadError(Exception):
"""Download Error exception.
raise MaxDownloadsReached()
filename = self.prepare_filename(info_dict)
-
+
# Forced printings
if self.params.get('forcetitle', False):
print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
except (ContentTooShortError, ), err:
self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return
-
+
if success:
try:
self.post_process(filename, info_dict)
# the connection was interrumpted and resuming appears to be
# possible. This is part of rtmpdump's normal usage, AFAIK.
basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename]
- retval = subprocess.call(basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)])
+ args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)]
+ if self.params.get('verbose', False):
+ try:
+ import pipes
+ shell_quote = lambda args: ' '.join(map(pipes.quote, args))
+ except ImportError:
+ shell_quote = repr
+ self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args))
+ retval = subprocess.call(args)
while retval == 2 or retval == 1:
prevsize = os.path.getsize(_encodeFilename(tmpfilename))
self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
'43': '360x640',
'44': '480x854',
'45': '720x1280',
- }
+ }
IE_NAME = u'youtube'
def report_lang(self):
lxml.etree
except NameError:
video_description = u'No description available.'
- if self._downloader.params.get('forcedescription', False) or self._downloader.params.get('writedescription', False):
- mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', video_webpage)
- if mobj is not None:
- video_description = mobj.group(1).decode('utf-8')
+ mobj = re.search(r'<meta name="description" content="(.*?)">', video_webpage)
+ if mobj is not None:
+ video_description = mobj.group(1).decode('utf-8')
else:
html_parser = lxml.etree.HTMLParser(encoding='utf-8')
vwebpage_doc = lxml.etree.parse(StringIO.StringIO(video_webpage), html_parser)
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
- _VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:groups/[^/]+/)?(?:videos?/)?(?:moogaloop.swf\?clip_id=)?([0-9]+)'
+ _VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:groups/[^/]+/)?(?:videos?/)?([0-9]+)'
IE_NAME = u'vimeo'
def __init__(self, downloader=None):
_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
- _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
+ _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&list=PL%s&'
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
_youtube_ie = None
IE_NAME = u'youtube:playlist'
# Extract video identifiers
ids_in_page = []
- for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+ video_indicator = self._VIDEO_INDICATOR_TEMPLATE % playlist_id
+ for mobj in re.finditer(video_indicator, page):
if mobj.group(1) not in ids_in_page:
ids_in_page.append(mobj.group(1))
video_ids.extend(ids_in_page)
playliststart = self._downloader.params.get('playliststart', 1) - 1
playlistend = self._downloader.params.get('playlistend', -1)
- video_ids = video_ids[playliststart:playlistend]
+
+ if playlistend == -1:
+ video_ids = video_ids[playliststart:]
+ else:
+ video_ids = video_ids[playliststart:playlistend]
for id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
data = json_data['Post']
else:
data = json_data
-
+
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
video_url = data['media']['url']
umobj = re.match(self._URL_EXT, video_url)
if umobj is None:
raise ValueError('Can not determine filename extension')
ext = umobj.group(1)
-
+
info = {
'id': data['item_id'],
'url': video_url,
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
-
+
def report_download_webpage(self, video_id):
"""Report webpage download."""
self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id)
def report_extraction(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
-
+
def report_config_download(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
mobj = re.search('track-description-value"><p>(.*?)</p>', webpage)
if mobj:
description = mobj.group(1)
-
+
# upload date
upload_date = None
mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage)
url_list = jsonData[fmt][bitrate]
except TypeError: # we have no bitrate info.
url_list = jsonData[fmt]
-
+
return url_list
def check_urls(self, url_list):
info = {
'id': _simplify_title(course + '_' + video),
}
-
+
self.report_extraction(info['id'])
baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
xmlUrl = baseUrl + video + '.xml'
self._downloader.trouble(u'ERROR: unable to extract performer')
return
performer = _unescapeHTML(mobj.group(1).decode('iso-8859-1'))
- video_title = performer + ' - ' + song_name
+ video_title = performer + ' - ' + song_name
mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
if mobj is None:
try:
urlh = urllib.urlopen(UPDATE_URL)
newcontent = urlh.read()
-
+
vmatch = re.search("__version__ = '([^']+)'", newcontent)
if vmatch is not None and vmatch.group(1) == __version__:
downloader.to_screen(u'youtube-dl is up-to-date (' + __version__ + ')')
downloader.to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
def parseOpts():
- # Deferred imports
- import getpass
- import optparse
- import shlex
-
def _readOptions(filename_bytes):
try:
optionf = open(filename_bytes)
verbosity.add_option('--console-title',
action='store_true', dest='consoletitle',
help='display progress in console titlebar', default=False)
+ verbosity.add_option('-v', '--verbose',
+ action='store_true', dest='verbose', help='print various debugging information', default=False)
filesystem.add_option('-t', '--title',
filesystem.add_option('-w', '--no-overwrites',
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
filesystem.add_option('-c', '--continue',
- action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
+ action='store_true', dest='continue_dl', help='resume partially downloaded files', default=True)
filesystem.add_option('--no-continue',
action='store_false', dest='continue_dl',
help='do not resume partially downloaded files (restart from beginning)')
# General configuration
cookie_processor = urllib2.HTTPCookieProcessor(jar)
- opener = urllib2.build_opener(urllib2.ProxyHandler(), cookie_processor, YoutubeDLHandler())
+ proxy_handler = urllib2.ProxyHandler()
+ opener = urllib2.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
urllib2.install_opener(opener)
socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
+ if opts.verbose:
+ print(u'[debug] Proxy map: ' + str(proxy_handler.proxies))
+
extractors = gen_extractors()
if opts.list_extractors:
'rejecttitle': opts.rejecttitle,
'max_downloads': opts.max_downloads,
'prefer_free_formats': opts.prefer_free_formats,
+ 'verbose': opts.verbose,
})
for extractor in extractors:
fd.add_info_extractor(extractor)
parser.error(u'you must provide at least one URL')
else:
sys.exit()
-
+
try:
retcode = fd.download(all_urls)
except MaxDownloadsReached: