Bump version number
[youtube-dl.git] / youtube-dl
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Author: Ricardo Garcia Gonzalez
4 # Author: Danny Colligan
5 # License: Public domain code
6 import htmlentitydefs
7 import httplib
8 import locale
9 import math
10 import netrc
11 import os
12 import os.path
13 import re
14 import socket
15 import string
16 import sys
17 import time
18 import urllib
19 import urllib2
20
21 std_headers = {
22         'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.8) Gecko/2009032609 Firefox/3.0.8',
23         'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
24         'Accept': 'text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
25         'Accept-Language': 'en-us,en;q=0.5',
26 }
27
28 simple_title_chars = string.ascii_letters.decode('ascii') + string.digits.decode('ascii')
29
30 class DownloadError(Exception):
31         """Download Error exception.
32         
33         This exception may be thrown by FileDownloader objects if they are not
34         configured to continue on errors. They will contain the appropriate
35         error message.
36         """
37         pass
38
39 class SameFileError(Exception):
40         """Same File exception.
41
42         This exception will be thrown by FileDownloader objects if they detect
43         multiple files would have to be downloaded to the same file on disk.
44         """
45         pass
46
47 class PostProcessingError(Exception):
48         """Post Processing exception.
49
50         This exception may be raised by PostProcessor's .run() method to
51         indicate an error in the postprocessing task.
52         """
53         pass
54
55 class UnavailableFormatError(Exception):
56         """Unavailable Format exception.
57
58         This exception will be thrown when a video is requested
59         in a format that is not available for that video.
60         """
61         pass
62
63 class ContentTooShortError(Exception):
64         """Content Too Short exception.
65
66         This exception may be raised by FileDownloader objects when a file they
67         download is too small for what the server announced first, indicating
68         the connection was probably interrupted.
69         """
70         # Both in bytes
71         downloaded = None
72         expected = None
73
74         def __init__(self, downloaded, expected):
75                 self.downloaded = downloaded
76                 self.expected = expected
77
78 class FileDownloader(object):
79         """File Downloader class.
80
81         File downloader objects are the ones responsible of downloading the
82         actual video file and writing it to disk if the user has requested
83         it, among some other tasks. In most cases there should be one per
84         program. As, given a video URL, the downloader doesn't know how to
85         extract all the needed information, task that InfoExtractors do, it
86         has to pass the URL to one of them.
87
88         For this, file downloader objects have a method that allows
89         InfoExtractors to be registered in a given order. When it is passed
90         a URL, the file downloader handles it to the first InfoExtractor it
91         finds that reports being able to handle it. The InfoExtractor extracts
92         all the information about the video or videos the URL refers to, and
93         asks the FileDownloader to process the video information, possibly
94         downloading the video.
95
96         File downloaders accept a lot of parameters. In order not to saturate
97         the object constructor with arguments, it receives a dictionary of
98         options instead. These options are available through the params
99         attribute for the InfoExtractors to use. The FileDownloader also
100         registers itself as the downloader in charge for the InfoExtractors
101         that are added to it, so this is a "mutual registration".
102
103         Available options:
104
105         username:       Username for authentication purposes.
106         password:       Password for authentication purposes.
107         usenetrc:       Use netrc for authentication instead.
108         quiet:          Do not print messages to stdout.
109         forceurl:       Force printing final URL.
110         forcetitle:     Force printing title.
111         simulate:       Do not download the video files.
112         format:         Video format code.
113         outtmpl:        Template for output names.
114         ignoreerrors:   Do not stop on download errors.
115         ratelimit:      Download speed limit, in bytes/sec.
116         nooverwrites:   Prevent overwriting files.
117         continuedl:     Try to continue downloads if possible.
118         """
119
120         params = None
121         _ies = []
122         _pps = []
123         _download_retcode = None
124
125         def __init__(self, params):
126                 """Create a FileDownloader object with the given options."""
127                 self._ies = []
128                 self._pps = []
129                 self._download_retcode = 0
130                 self.params = params
131         
132         @staticmethod
133         def pmkdir(filename):
134                 """Create directory components in filename. Similar to Unix "mkdir -p"."""
135                 components = filename.split(os.sep)
136                 aggregate = [os.sep.join(components[0:x]) for x in xrange(1, len(components))]
137                 aggregate = ['%s%s' % (x, os.sep) for x in aggregate] # Finish names with separator
138                 for dir in aggregate:
139                         if not os.path.exists(dir):
140                                 os.mkdir(dir)
141         
142         @staticmethod
143         def format_bytes(bytes):
144                 if bytes is None:
145                         return 'N/A'
146                 if bytes == 0:
147                         exponent = 0
148                 else:
149                         exponent = long(math.log(float(bytes), 1024.0))
150                 suffix = 'bkMGTPEZY'[exponent]
151                 converted = float(bytes) / float(1024**exponent)
152                 return '%.2f%s' % (converted, suffix)
153
154         @staticmethod
155         def calc_percent(byte_counter, data_len):
156                 if data_len is None:
157                         return '---.-%'
158                 return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0))
159
160         @staticmethod
161         def calc_eta(start, now, total, current):
162                 if total is None:
163                         return '--:--'
164                 dif = now - start
165                 if current == 0 or dif < 0.001: # One millisecond
166                         return '--:--'
167                 rate = float(current) / dif
168                 eta = long((float(total) - float(current)) / rate)
169                 (eta_mins, eta_secs) = divmod(eta, 60)
170                 if eta_mins > 99:
171                         return '--:--'
172                 return '%02d:%02d' % (eta_mins, eta_secs)
173
174         @staticmethod
175         def calc_speed(start, now, bytes):
176                 dif = now - start
177                 if bytes == 0 or dif < 0.001: # One millisecond
178                         return '%10s' % '---b/s'
179                 return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif))
180
181         @staticmethod
182         def best_block_size(elapsed_time, bytes):
183                 new_min = max(bytes / 2.0, 1.0)
184                 new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
185                 if elapsed_time < 0.001:
186                         return long(new_max)
187                 rate = bytes / elapsed_time
188                 if rate > new_max:
189                         return long(new_max)
190                 if rate < new_min:
191                         return long(new_min)
192                 return long(rate)
193
194         @staticmethod
195         def parse_bytes(bytestr):
196                 """Parse a string indicating a byte quantity into a long integer."""
197                 matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
198                 if matchobj is None:
199                         return None
200                 number = float(matchobj.group(1))
201                 multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
202                 return long(round(number * multiplier))
203
204         @staticmethod
205         def verify_url(url):
206                 """Verify a URL is valid and data could be downloaded."""
207                 request = urllib2.Request(url, None, std_headers)
208                 data = urllib2.urlopen(request)
209                 data.read(1)
210                 data.close()
211
212         def add_info_extractor(self, ie):
213                 """Add an InfoExtractor object to the end of the list."""
214                 self._ies.append(ie)
215                 ie.set_downloader(self)
216         
217         def add_post_processor(self, pp):
218                 """Add a PostProcessor object to the end of the chain."""
219                 self._pps.append(pp)
220                 pp.set_downloader(self)
221         
222         def to_stdout(self, message, skip_eol=False):
223                 """Print message to stdout if not in quiet mode."""
224                 if not self.params.get('quiet', False):
225                         print (u'%s%s' % (message, [u'\n', u''][skip_eol])).encode(locale.getpreferredencoding()),
226                         sys.stdout.flush()
227         
228         def to_stderr(self, message):
229                 """Print message to stderr."""
230                 print >>sys.stderr, message
231         
232         def fixed_template(self):
233                 """Checks if the output template is fixed."""
234                 return (re.search(ur'(?u)%\(.+?\)s', self.params['outtmpl']) is None)
235
236         def trouble(self, message=None):
237                 """Determine action to take when a download problem appears.
238
239                 Depending on if the downloader has been configured to ignore
240                 download errors or not, this method may throw an exception or
241                 not when errors are found, after printing the message.
242                 """
243                 if message is not None:
244                         self.to_stderr(message)
245                 if not self.params.get('ignoreerrors', False):
246                         raise DownloadError(message)
247                 self._download_retcode = 1
248
249         def slow_down(self, start_time, byte_counter):
250                 """Sleep if the download speed is over the rate limit."""
251                 rate_limit = self.params.get('ratelimit', None)
252                 if rate_limit is None or byte_counter == 0:
253                         return
254                 now = time.time()
255                 elapsed = now - start_time
256                 if elapsed <= 0.0:
257                         return
258                 speed = float(byte_counter) / elapsed
259                 if speed > rate_limit:
260                         time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
261
262         def report_destination(self, filename):
263                 """Report destination filename."""
264                 self.to_stdout(u'[download] Destination: %s' % filename)
265         
266         def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
267                 """Report download progress."""
268                 self.to_stdout(u'\r[download] %s of %s at %s ETA %s' %
269                                 (percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
270
271         def report_resuming_byte(self, resume_len):
272                 """Report attemtp to resume at given byte."""
273                 self.to_stdout(u'[download] Resuming download at byte %s' % resume_len)
274         
275         def report_file_already_downloaded(self, file_name):
276                 """Report file has already been fully downloaded."""
277                 self.to_stdout(u'[download] %s has already been downloaded' % file_name)
278         
279         def report_unable_to_resume(self):
280                 """Report it was impossible to resume download."""
281                 self.to_stdout(u'[download] Unable to resume')
282         
283         def report_finish(self):
284                 """Report download finished."""
285                 self.to_stdout(u'')
286
287         def process_info(self, info_dict):
288                 """Process a single dictionary returned by an InfoExtractor."""
289                 # Do nothing else if in simulate mode
290                 if self.params.get('simulate', False):
291                         try:
292                                 self.verify_url(info_dict['url'])
293                         except (OSError, IOError, urllib2.URLError, httplib.HTTPException, socket.error), err:
294                                 raise UnavailableFormatError
295
296                         # Forced printings
297                         if self.params.get('forcetitle', False):
298                                 print info_dict['title'].encode(locale.getpreferredencoding())
299                         if self.params.get('forceurl', False):
300                                 print info_dict['url'].encode(locale.getpreferredencoding())
301
302                         return
303                         
304                 try:
305                         template_dict = dict(info_dict)
306                         template_dict['epoch'] = unicode(long(time.time()))
307                         filename = self.params['outtmpl'] % template_dict
308                 except (ValueError, KeyError), err:
309                         self.trouble('ERROR: invalid output template or system charset: %s' % str(err))
310                 if self.params['nooverwrites'] and os.path.exists(filename):
311                         self.to_stderr('WARNING: file exists: %s; skipping' % filename)
312                         return
313
314                 try:
315                         self.pmkdir(filename)
316                 except (OSError, IOError), err:
317                         self.trouble('ERROR: unable to create directories: %s' % str(err))
318                         return
319
320                 try:
321                         success = self._do_download(filename, info_dict['url'])
322                 except (OSError, IOError), err:
323                         raise UnavailableFormatError
324                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
325                         self.trouble('ERROR: unable to download video data: %s' % str(err))
326                         return
327                 except (ContentTooShortError, ), err:
328                         self.trouble('ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
329                         return
330
331                 if success:
332                         try:
333                                 self.post_process(filename, info_dict)
334                         except (PostProcessingError), err:
335                                 self.trouble('ERROR: postprocessing: %s' % str(err))
336                                 return
337
338         def download(self, url_list):
339                 """Download a given list of URLs."""
340                 if len(url_list) > 1 and self.fixed_template():
341                         raise SameFileError(self.params['outtmpl'])
342
343                 for url in url_list:
344                         suitable_found = False
345                         for ie in self._ies:
346                                 # Go to next InfoExtractor if not suitable
347                                 if not ie.suitable(url):
348                                         continue
349
350                                 # Suitable InfoExtractor found
351                                 suitable_found = True
352
353                                 # Extract information from URL and process it
354                                 ie.extract(url)
355
356                                 # Suitable InfoExtractor had been found; go to next URL
357                                 break
358
359                         if not suitable_found:
360                                 self.trouble('ERROR: no suitable InfoExtractor: %s' % url)
361
362                 return self._download_retcode
363
364         def post_process(self, filename, ie_info):
365                 """Run the postprocessing chain on the given file."""
366                 info = dict(ie_info)
367                 info['filepath'] = filename
368                 for pp in self._pps:
369                         info = pp.run(info)
370                         if info is None:
371                                 break
372         
373         def _do_download(self, filename, url):
374                 stream = None
375                 open_mode = 'ab'
376
377                 basic_request = urllib2.Request(url, None, std_headers)
378                 request = urllib2.Request(url, None, std_headers)
379
380                 # Attempt to resume download with "continuedl" option
381                 if os.path.isfile(filename):
382                         resume_len = os.path.getsize(filename)
383                 else:
384                         resume_len = 0
385                 if self.params['continuedl'] and resume_len != 0:
386                         self.report_resuming_byte(resume_len)
387                         request.add_header('Range','bytes=%d-' % resume_len)
388
389                 # Establish connection
390                 try:
391                         data = urllib2.urlopen(request)
392                 except (urllib2.HTTPError, ), err:
393                         if err.code != 416: #  416 is 'Requested range not satisfiable'
394                                 raise
395                         data = urllib2.urlopen(basic_request)
396                         content_length = data.info()['Content-Length']
397                         if content_length is not None and long(content_length) == resume_len:
398                                 self.report_file_already_downloaded(filename)
399                                 return True
400                         else:
401                                 self.report_unable_to_resume()
402                                 open_mode = 'wb'
403
404                 data_len = data.info().get('Content-length', None)
405                 data_len_str = self.format_bytes(data_len)
406                 byte_counter = 0
407                 block_size = 1024
408                 start = time.time()
409                 while True:
410                         # Download and write
411                         before = time.time()
412                         data_block = data.read(block_size)
413                         after = time.time()
414                         data_block_len = len(data_block)
415                         if data_block_len == 0:
416                                 break
417                         byte_counter += data_block_len
418
419                         # Open file just in time
420                         if stream is None:
421                                 try:
422                                         stream = open(filename, open_mode)
423                                         self.report_destination(filename)
424                                 except (OSError, IOError), err:
425                                         self.trouble('ERROR: unable to open for writing: %s' % str(err))
426                                         return False
427                         stream.write(data_block)
428                         block_size = self.best_block_size(after - before, data_block_len)
429
430                         # Progress message
431                         percent_str = self.calc_percent(byte_counter, data_len)
432                         eta_str = self.calc_eta(start, time.time(), data_len, byte_counter)
433                         speed_str = self.calc_speed(start, time.time(), byte_counter)
434                         self.report_progress(percent_str, data_len_str, speed_str, eta_str)
435
436                         # Apply rate limit
437                         self.slow_down(start, byte_counter)
438
439                 self.report_finish()
440                 if data_len is not None and str(byte_counter) != data_len:
441                         raise ContentTooShortError(byte_counter, long(data_len))
442                 return True
443
444 class InfoExtractor(object):
445         """Information Extractor class.
446
447         Information extractors are the classes that, given a URL, extract
448         information from the video (or videos) the URL refers to. This
449         information includes the real video URL, the video title and simplified
450         title, author and others. The information is stored in a dictionary
451         which is then passed to the FileDownloader. The FileDownloader
452         processes this information possibly downloading the video to the file
453         system, among other possible outcomes. The dictionaries must include
454         the following fields:
455
456         id:             Video identifier.
457         url:            Final video URL.
458         uploader:       Nickname of the video uploader.
459         title:          Literal title.
460         stitle:         Simplified title.
461         ext:            Video filename extension.
462
463         Subclasses of this one should re-define the _real_initialize() and
464         _real_extract() methods, as well as the suitable() static method.
465         Probably, they should also be instantiated and added to the main
466         downloader.
467         """
468
469         _ready = False
470         _downloader = None
471
472         def __init__(self, downloader=None):
473                 """Constructor. Receives an optional downloader."""
474                 self._ready = False
475                 self.set_downloader(downloader)
476
477         @staticmethod
478         def suitable(url):
479                 """Receives a URL and returns True if suitable for this IE."""
480                 return False
481
482         def initialize(self):
483                 """Initializes an instance (authentication, etc)."""
484                 if not self._ready:
485                         self._real_initialize()
486                         self._ready = True
487
488         def extract(self, url):
489                 """Extracts URL information and returns it in list of dicts."""
490                 self.initialize()
491                 return self._real_extract(url)
492
493         def set_downloader(self, downloader):
494                 """Sets the downloader for this IE."""
495                 self._downloader = downloader
496         
497         def _real_initialize(self):
498                 """Real initialization process. Redefine in subclasses."""
499                 pass
500
501         def _real_extract(self, url):
502                 """Real extraction process. Redefine in subclasses."""
503                 pass
504
505 class YoutubeIE(InfoExtractor):
506         """Information extractor for youtube.com."""
507
508         _VALID_URL = r'^((?:http://)?(?:\w+\.)?youtube\.com/(?:(?:v/)|(?:(?:watch(?:\.php)?)?\?(?:.+&)?v=)))?([0-9A-Za-z_-]+)(?(1).+)?$'
509         _LANG_URL = r'http://uk.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
510         _LOGIN_URL = 'http://www.youtube.com/signup?next=/&gl=US&hl=en'
511         _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
512         _NETRC_MACHINE = 'youtube'
513         _available_formats = ['22', '35', '18', '17', '13'] # listed in order of priority for -b flag
514         _video_extensions = {
515                 '13': '3gp',
516                 '17': 'mp4',
517                 '18': 'mp4',
518                 '22': 'mp4',
519         }
520
521         @staticmethod
522         def suitable(url):
523                 return (re.match(YoutubeIE._VALID_URL, url) is not None)
524
525         @staticmethod
526         def htmlentity_transform(matchobj):
527                 """Transforms an HTML entity to a Unicode character."""
528                 entity = matchobj.group(1)
529
530                 # Known non-numeric HTML entity
531                 if entity in htmlentitydefs.name2codepoint:
532                         return unichr(htmlentitydefs.name2codepoint[entity])
533
534                 # Unicode character
535                 mobj = re.match(ur'(?u)#(x?\d+)', entity)
536                 if mobj is not None:
537                         numstr = mobj.group(1)
538                         if numstr.startswith(u'x'):
539                                 base = 16
540                                 numstr = u'0%s' % numstr
541                         else:
542                                 base = 10
543                         return unichr(long(numstr, base))
544
545                 # Unknown entity in name, return its literal representation
546                 return (u'&%s;' % entity)
547
548         def report_lang(self):
549                 """Report attempt to set language."""
550                 self._downloader.to_stdout(u'[youtube] Setting language')
551
552         def report_login(self):
553                 """Report attempt to log in."""
554                 self._downloader.to_stdout(u'[youtube] Logging in')
555         
556         def report_age_confirmation(self):
557                 """Report attempt to confirm age."""
558                 self._downloader.to_stdout(u'[youtube] Confirming age')
559         
560         def report_webpage_download(self, video_id):
561                 """Report attempt to download webpage."""
562                 self._downloader.to_stdout(u'[youtube] %s: Downloading video webpage' % video_id)
563         
564         def report_information_extraction(self, video_id):
565                 """Report attempt to extract video information."""
566                 self._downloader.to_stdout(u'[youtube] %s: Extracting video information' % video_id)
567         
568         def report_video_url(self, video_id, video_real_url):
569                 """Report extracted video URL."""
570                 self._downloader.to_stdout(u'[youtube] %s: URL: %s' % (video_id, video_real_url))
571         
572         def report_unavailable_format(self, video_id, format):
573                 """Report extracted video URL."""
574                 self._downloader.to_stdout(u'[youtube] %s: Format %s not available' % (video_id, format))
575         
576         def _real_initialize(self):
577                 if self._downloader is None:
578                         return
579
580                 username = None
581                 password = None
582                 downloader_params = self._downloader.params
583
584                 # Attempt to use provided username and password or .netrc data
585                 if downloader_params.get('username', None) is not None:
586                         username = downloader_params['username']
587                         password = downloader_params['password']
588                 elif downloader_params.get('usenetrc', False):
589                         try:
590                                 info = netrc.netrc().authenticators(self._NETRC_MACHINE)
591                                 if info is not None:
592                                         username = info[0]
593                                         password = info[2]
594                                 else:
595                                         raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
596                         except (IOError, netrc.NetrcParseError), err:
597                                 self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
598                                 return
599
600                 # Set language
601                 request = urllib2.Request(self._LANG_URL, None, std_headers)
602                 try:
603                         self.report_lang()
604                         urllib2.urlopen(request).read()
605                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
606                         self._downloader.to_stderr(u'WARNING: unable to set language: %s' % str(err))
607                         return
608
609                 # No authentication to be performed
610                 if username is None:
611                         return
612
613                 # Log in
614                 login_form = {
615                                 'current_form': 'loginForm',
616                                 'next':         '/',
617                                 'action_login': 'Log In',
618                                 'username':     username,
619                                 'password':     password,
620                                 }
621                 request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form), std_headers)
622                 try:
623                         self.report_login()
624                         login_results = urllib2.urlopen(request).read()
625                         if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
626                                 self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
627                                 return
628                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
629                         self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
630                         return
631         
632                 # Confirm age
633                 age_form = {
634                                 'next_url':             '/',
635                                 'action_confirm':       'Confirm',
636                                 }
637                 request = urllib2.Request(self._AGE_URL, urllib.urlencode(age_form), std_headers)
638                 try:
639                         self.report_age_confirmation()
640                         age_results = urllib2.urlopen(request).read()
641                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
642                         self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
643                         return
644
645         def _real_extract(self, url):
646                 # Extract video id from URL
647                 mobj = re.match(self._VALID_URL, url)
648                 if mobj is None:
649                         self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
650                         return
651                 video_id = mobj.group(2)
652
653                 # Downloader parameters
654                 best_quality = False
655                 format_param = None
656                 quality_index = 0
657                 if self._downloader is not None:
658                         params = self._downloader.params
659                         format_param = params.get('format', None)
660                         if format_param == '0':
661                                 format_param = self._available_formats[quality_index]
662                                 best_quality = True
663
664                 while True:
665                         # Extension
666                         video_extension = self._video_extensions.get(format_param, 'flv')
667
668                         # Normalize URL, including format
669                         normalized_url = 'http://www.youtube.com/watch?v=%s&gl=US&hl=en' % video_id
670                         if format_param is not None:
671                                 normalized_url = '%s&fmt=%s' % (normalized_url, format_param)
672                         request = urllib2.Request(normalized_url, None, std_headers)
673                         try:
674                                 self.report_webpage_download(video_id)
675                                 video_webpage = urllib2.urlopen(request).read()
676                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
677                                 self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
678                                 return
679                         self.report_information_extraction(video_id)
680                         
681                         # "t" param
682                         mobj = re.search(r', "t": "([^"]+)"', video_webpage)
683                         if mobj is None:
684                                 self._downloader.trouble(u'ERROR: unable to extract "t" parameter')
685                                 return
686                         video_real_url = 'http://www.youtube.com/get_video?video_id=%s&t=%s&el=detailpage&ps=' % (video_id, mobj.group(1))
687                         if format_param is not None:
688                                 video_real_url = '%s&fmt=%s' % (video_real_url, format_param)
689                         self.report_video_url(video_id, video_real_url)
690
691                         # uploader
692                         mobj = re.search(r"var watchUsername = '([^']+)';", video_webpage)
693                         if mobj is None:
694                                 self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
695                                 return
696                         video_uploader = mobj.group(1)
697
698                         # title
699                         mobj = re.search(r'(?im)<title>YouTube - ([^<]*)</title>', video_webpage)
700                         if mobj is None:
701                                 self._downloader.trouble(u'ERROR: unable to extract video title')
702                                 return
703                         video_title = mobj.group(1).decode('utf-8')
704                         video_title = re.sub(ur'(?u)&(.+?);', self.htmlentity_transform, video_title)
705                         video_title = video_title.replace(os.sep, u'%')
706
707                         # simplified title
708                         simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
709                         simple_title = simple_title.strip(ur'_')
710
711                         try:
712                                 # Process video information
713                                 self._downloader.process_info({
714                                         'id':           video_id.decode('utf-8'),
715                                         'url':          video_real_url.decode('utf-8'),
716                                         'uploader':     video_uploader.decode('utf-8'),
717                                         'title':        video_title,
718                                         'stitle':       simple_title,
719                                         'ext':          video_extension.decode('utf-8'),
720                                 })
721
722                                 return
723
724                         except UnavailableFormatError, err:
725                                 if best_quality:
726                                         if quality_index == len(self._available_formats) - 1:
727                                                 # I don't ever expect this to happen
728                                                 self._downloader.trouble(u'ERROR: no known formats available for video')
729                                                 return
730                                         else:
731                                                 self.report_unavailable_format(video_id, format_param)
732                                                 quality_index += 1
733                                                 format_param = self._available_formats[quality_index]
734                                                 continue
735                                 else: 
736                                         self._downloader.trouble('ERROR: format not available for video')
737                                         return
738
739
740 class MetacafeIE(InfoExtractor):
741         """Information Extractor for metacafe.com."""
742
743         _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
744         _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
745         _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
746         _youtube_ie = None
747
748         def __init__(self, youtube_ie, downloader=None):
749                 InfoExtractor.__init__(self, downloader)
750                 self._youtube_ie = youtube_ie
751
752         @staticmethod
753         def suitable(url):
754                 return (re.match(MetacafeIE._VALID_URL, url) is not None)
755
756         def report_disclaimer(self):
757                 """Report disclaimer retrieval."""
758                 self._downloader.to_stdout(u'[metacafe] Retrieving disclaimer')
759
760         def report_age_confirmation(self):
761                 """Report attempt to confirm age."""
762                 self._downloader.to_stdout(u'[metacafe] Confirming age')
763         
764         def report_download_webpage(self, video_id):
765                 """Report webpage download."""
766                 self._downloader.to_stdout(u'[metacafe] %s: Downloading webpage' % video_id)
767         
768         def report_extraction(self, video_id):
769                 """Report information extraction."""
770                 self._downloader.to_stdout(u'[metacafe] %s: Extracting information' % video_id)
771
772         def _real_initialize(self):
773                 # Retrieve disclaimer
774                 request = urllib2.Request(self._DISCLAIMER, None, std_headers)
775                 try:
776                         self.report_disclaimer()
777                         disclaimer = urllib2.urlopen(request).read()
778                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
779                         self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err))
780                         return
781
782                 # Confirm age
783                 disclaimer_form = {
784                         'filters': '0',
785                         'submit': "Continue - I'm over 18",
786                         }
787                 request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form), std_headers)
788                 try:
789                         self.report_age_confirmation()
790                         disclaimer = urllib2.urlopen(request).read()
791                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
792                         self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
793                         return
794         
795         def _real_extract(self, url):
796                 # Extract id and simplified title from URL
797                 mobj = re.match(self._VALID_URL, url)
798                 if mobj is None:
799                         self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
800                         return
801
802                 video_id = mobj.group(1)
803
804                 # Check if video comes from YouTube
805                 mobj2 = re.match(r'^yt-(.*)$', video_id)
806                 if mobj2 is not None:
807                         self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1))
808                         return
809
810                 simple_title = mobj.group(2).decode('utf-8')
811                 video_extension = 'flv'
812
813                 # Retrieve video webpage to extract further information
814                 request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id)
815                 try:
816                         self.report_download_webpage(video_id)
817                         webpage = urllib2.urlopen(request).read()
818                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
819                         self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
820                         return
821
822                 # Extract URL, uploader and title from webpage
823                 self.report_extraction(video_id)
824                 mobj = re.search(r'(?m)&mediaURL=(http.*?\.flv)', webpage)
825                 if mobj is None:
826                         self._downloader.trouble(u'ERROR: unable to extract media URL')
827                         return
828                 mediaURL = urllib.unquote(mobj.group(1))
829
830                 mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
831                 if mobj is None:
832                         self._downloader.trouble(u'ERROR: unable to extract gdaKey')
833                         return
834                 gdaKey = mobj.group(1)
835
836                 video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
837
838                 mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
839                 if mobj is None:
840                         self._downloader.trouble(u'ERROR: unable to extract title')
841                         return
842                 video_title = mobj.group(1).decode('utf-8')
843
844                 mobj = re.search(r'(?ms)<li id="ChnlUsr">.*?Submitter:.*?<a .*?>(.*?)<', webpage)
845                 if mobj is None:
846                         self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
847                         return
848                 video_uploader = mobj.group(1)
849
850                 try:
851                         # Process video information
852                         self._downloader.process_info({
853                                 'id':           video_id.decode('utf-8'),
854                                 'url':          video_url.decode('utf-8'),
855                                 'uploader':     video_uploader.decode('utf-8'),
856                                 'title':        video_title,
857                                 'stitle':       simple_title,
858                                 'ext':          video_extension.decode('utf-8'),
859                         })
860                 except UnavailableFormatError:
861                         self._downloader.trouble(u'ERROR: format not available for video')
862
863
864 class YoutubeSearchIE(InfoExtractor):
865         """Information Extractor for YouTube search queries."""
866         _VALID_QUERY = r'ytsearch(\d+|all)?:[\s\S]+'
867         _TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en'
868         _VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
869         _MORE_PAGES_INDICATOR = r'>Next</a>'
870         _youtube_ie = None
871         _max_youtube_results = 1000
872
873         def __init__(self, youtube_ie, downloader=None):
874                 InfoExtractor.__init__(self, downloader)
875                 self._youtube_ie = youtube_ie
876         
877         @staticmethod
878         def suitable(url):
879                 return (re.match(YoutubeSearchIE._VALID_QUERY, url) is not None)
880
881         def report_download_page(self, query, pagenum):
882                 """Report attempt to download playlist page with given number."""
883                 self._downloader.to_stdout(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
884
885         def _real_initialize(self):
886                 self._youtube_ie.initialize()
887         
888         def _real_extract(self, query):
889                 mobj = re.match(self._VALID_QUERY, query)
890                 if mobj is None:
891                         self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
892                         return
893
894                 prefix, query = query.split(':')
895                 prefix = prefix[8:]
896                 if prefix == '':
897                         self._download_n_results(query, 1)
898                         return
899                 elif prefix == 'all':
900                         self._download_n_results(query, self._max_youtube_results)
901                         return
902                 else:
903                         try:
904                                 n = long(prefix)
905                                 if n <= 0:
906                                         self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
907                                         return
908                                 elif n > self._max_youtube_results:
909                                         self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)'  % (self._max_youtube_results, n))
910                                         n = self._max_youtube_results
911                                 self._download_n_results(query, n)
912                                 return
913                         except ValueError: # parsing prefix as integer fails
914                                 self._download_n_results(query, 1)
915                                 return
916
917         def _download_n_results(self, query, n):
918                 """Downloads a specified number of results for a query"""
919
920                 video_ids = []
921                 already_seen = set()
922                 pagenum = 1
923
924                 while True:
925                         self.report_download_page(query, pagenum)
926                         result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
927                         request = urllib2.Request(result_url, None, std_headers)
928                         try:
929                                 page = urllib2.urlopen(request).read()
930                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
931                                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
932                                 return
933
934                         # Extract video identifiers
935                         for mobj in re.finditer(self._VIDEO_INDICATOR, page):
936                                 video_id = page[mobj.span()[0]:mobj.span()[1]].split('=')[2][:-1]
937                                 if video_id not in already_seen:
938                                         video_ids.append(video_id)
939                                         already_seen.add(video_id)
940                                         if len(video_ids) == n:
941                                                 # Specified n videos reached
942                                                 for id in video_ids:
943                                                         self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
944                                                 return
945
946                         if self._MORE_PAGES_INDICATOR not in page:
947                                 for id in video_ids:
948                                         self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
949                                 return
950
951                         pagenum = pagenum + 1
952
953 class YoutubePlaylistIE(InfoExtractor):
954         """Information Extractor for YouTube playlists."""
955
956         _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/view_play_list\?p=(.+)'
957         _TEMPLATE_URL = 'http://www.youtube.com/view_play_list?p=%s&page=%s&gl=US&hl=en'
958         _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
959         _MORE_PAGES_INDICATOR = r'/view_play_list?p=%s&page=%s'
960         _youtube_ie = None
961
962         def __init__(self, youtube_ie, downloader=None):
963                 InfoExtractor.__init__(self, downloader)
964                 self._youtube_ie = youtube_ie
965         
966         @staticmethod
967         def suitable(url):
968                 return (re.match(YoutubePlaylistIE._VALID_URL, url) is not None)
969
970         def report_download_page(self, playlist_id, pagenum):
971                 """Report attempt to download playlist page with given number."""
972                 self._downloader.to_stdout(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
973
974         def _real_initialize(self):
975                 self._youtube_ie.initialize()
976         
977         def _real_extract(self, url):
978                 # Extract playlist id
979                 mobj = re.match(self._VALID_URL, url)
980                 if mobj is None:
981                         self._downloader.trouble(u'ERROR: invalid url: %s' % url)
982                         return
983
984                 # Download playlist pages
985                 playlist_id = mobj.group(1)
986                 video_ids = []
987                 pagenum = 1
988
989                 while True:
990                         self.report_download_page(playlist_id, pagenum)
991                         request = urllib2.Request(self._TEMPLATE_URL % (playlist_id, pagenum), None, std_headers)
992                         try:
993                                 page = urllib2.urlopen(request).read()
994                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
995                                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
996                                 return
997
998                         # Extract video identifiers
999                         ids_in_page = []
1000                         for mobj in re.finditer(self._VIDEO_INDICATOR, page):
1001                                 if mobj.group(1) not in ids_in_page:
1002                                         ids_in_page.append(mobj.group(1))
1003                         video_ids.extend(ids_in_page)
1004
1005                         if (self._MORE_PAGES_INDICATOR % (playlist_id.upper(), pagenum + 1)) not in page:
1006                                 break
1007                         pagenum = pagenum + 1
1008
1009                 for id in video_ids:
1010                         self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
1011                 return
1012
1013 class PostProcessor(object):
1014         """Post Processor class.
1015
1016         PostProcessor objects can be added to downloaders with their
1017         add_post_processor() method. When the downloader has finished a
1018         successful download, it will take its internal chain of PostProcessors
1019         and start calling the run() method on each one of them, first with
1020         an initial argument and then with the returned value of the previous
1021         PostProcessor.
1022
1023         The chain will be stopped if one of them ever returns None or the end
1024         of the chain is reached.
1025
1026         PostProcessor objects follow a "mutual registration" process similar
1027         to InfoExtractor objects.
1028         """
1029
1030         _downloader = None
1031
1032         def __init__(self, downloader=None):
1033                 self._downloader = downloader
1034
1035         def set_downloader(self, downloader):
1036                 """Sets the downloader for this PP."""
1037                 self._downloader = downloader
1038         
1039         def run(self, information):
1040                 """Run the PostProcessor.
1041
1042                 The "information" argument is a dictionary like the ones
1043                 composed by InfoExtractors. The only difference is that this
1044                 one has an extra field called "filepath" that points to the
1045                 downloaded file.
1046
1047                 When this method returns None, the postprocessing chain is
1048                 stopped. However, this method may return an information
1049                 dictionary that will be passed to the next postprocessing
1050                 object in the chain. It can be the one it received after
1051                 changing some fields.
1052
1053                 In addition, this method may raise a PostProcessingError
1054                 exception that will be taken into account by the downloader
1055                 it was called from.
1056                 """
1057                 return information # by default, do nothing
1058         
1059 ### MAIN PROGRAM ###
1060 if __name__ == '__main__':
1061         try:
1062                 # Modules needed only when running the main program
1063                 import getpass
1064                 import optparse
1065
1066                 # General configuration
1067                 urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler()))
1068                 urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor()))
1069                 socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
1070
1071                 # Parse command line
1072                 parser = optparse.OptionParser(
1073                         usage='Usage: %prog [options] url...',
1074                         version='2009.06.29',
1075                         conflict_handler='resolve',
1076                 )
1077
1078                 parser.add_option('-h', '--help',
1079                                 action='help', help='print this help text and exit')
1080                 parser.add_option('-v', '--version',
1081                                 action='version', help='print program version and exit')
1082                 parser.add_option('-i', '--ignore-errors',
1083                                 action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
1084                 parser.add_option('-r', '--rate-limit',
1085                                 dest='ratelimit', metavar='L', help='download rate limit (e.g. 50k or 44.6m)')
1086
1087                 authentication = optparse.OptionGroup(parser, 'Authentication Options')
1088                 authentication.add_option('-u', '--username',
1089                                 dest='username', metavar='UN', help='account username')
1090                 authentication.add_option('-p', '--password',
1091                                 dest='password', metavar='PW', help='account password')
1092                 authentication.add_option('-n', '--netrc',
1093                                 action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
1094                 parser.add_option_group(authentication)
1095
1096                 video_format = optparse.OptionGroup(parser, 'Video Format Options')
1097                 video_format.add_option('-f', '--format',
1098                                 action='store', dest='format', metavar='FMT', help='video format code')
1099                 video_format.add_option('-b', '--best-quality',
1100                                 action='store_const', dest='format', help='download the best quality video possible', const='0')
1101                 video_format.add_option('-m', '--mobile-version',
1102                                 action='store_const', dest='format', help='alias for -f 17', const='17')
1103                 video_format.add_option('-d', '--high-def',
1104                                 action='store_const', dest='format', help='alias for -f 22', const='22')
1105                 parser.add_option_group(video_format)
1106
1107                 verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
1108                 verbosity.add_option('-q', '--quiet',
1109                                 action='store_true', dest='quiet', help='activates quiet mode', default=False)
1110                 verbosity.add_option('-s', '--simulate',
1111                                 action='store_true', dest='simulate', help='do not download video', default=False)
1112                 verbosity.add_option('-g', '--get-url',
1113                                 action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
1114                 verbosity.add_option('-e', '--get-title',
1115                                 action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
1116                 parser.add_option_group(verbosity)
1117
1118                 filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
1119                 filesystem.add_option('-t', '--title',
1120                                 action='store_true', dest='usetitle', help='use title in file name', default=False)
1121                 filesystem.add_option('-l', '--literal',
1122                                 action='store_true', dest='useliteral', help='use literal title in file name', default=False)
1123                 filesystem.add_option('-o', '--output',
1124                                 dest='outtmpl', metavar='TPL', help='output filename template')
1125                 filesystem.add_option('-a', '--batch-file',
1126                                 dest='batchfile', metavar='F', help='file containing URLs to download')
1127                 filesystem.add_option('-w', '--no-overwrites',
1128                                 action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
1129                 filesystem.add_option('-c', '--continue',
1130                                 action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
1131                 parser.add_option_group(filesystem)
1132
1133                 (opts, args) = parser.parse_args()
1134
1135                 # Batch file verification
1136                 batchurls = []
1137                 if opts.batchfile is not None:
1138                         try:
1139                                 batchurls = open(opts.batchfile, 'r').readlines()
1140                                 batchurls = [x.strip() for x in batchurls]
1141                                 batchurls = [x for x in batchurls if len(x) > 0]
1142                         except IOError:
1143                                 sys.exit(u'ERROR: batch file could not be read')
1144                 all_urls = batchurls + args
1145
1146                 # Conflicting, missing and erroneous options
1147                 if len(all_urls) < 1:
1148                         parser.error(u'you must provide at least one URL')
1149                 if opts.usenetrc and (opts.username is not None or opts.password is not None):
1150                         parser.error(u'using .netrc conflicts with giving username/password')
1151                 if opts.password is not None and opts.username is None:
1152                         parser.error(u'account username missing')
1153                 if opts.outtmpl is not None and (opts.useliteral or opts.usetitle):
1154                         parser.error(u'using output template conflicts with using title or literal title')
1155                 if opts.usetitle and opts.useliteral:
1156                         parser.error(u'using title conflicts with using literal title')
1157                 if opts.username is not None and opts.password is None:
1158                         opts.password = getpass.getpass(u'Type account password and press return:')
1159                 if opts.ratelimit is not None:
1160                         numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
1161                         if numeric_limit is None:
1162                                 parser.error(u'invalid rate limit specified')
1163                         opts.ratelimit = numeric_limit
1164
1165                 # Information extractors
1166                 youtube_ie = YoutubeIE()
1167                 metacafe_ie = MetacafeIE(youtube_ie)
1168                 youtube_pl_ie = YoutubePlaylistIE(youtube_ie)
1169                 youtube_search_ie = YoutubeSearchIE(youtube_ie)
1170
1171                 # File downloader
1172                 fd = FileDownloader({
1173                         'usenetrc': opts.usenetrc,
1174                         'username': opts.username,
1175                         'password': opts.password,
1176                         'quiet': (opts.quiet or opts.geturl or opts.gettitle),
1177                         'forceurl': opts.geturl,
1178                         'forcetitle': opts.gettitle,
1179                         'simulate': (opts.simulate or opts.geturl or opts.gettitle),
1180                         'format': opts.format,
1181                         'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(locale.getpreferredencoding()))
1182                                 or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
1183                                 or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
1184                                 or u'%(id)s.%(ext)s'),
1185                         'ignoreerrors': opts.ignoreerrors,
1186                         'ratelimit': opts.ratelimit,
1187                         'nooverwrites': opts.nooverwrites,
1188                         'continuedl': opts.continue_dl,
1189                         })
1190                 fd.add_info_extractor(youtube_search_ie)
1191                 fd.add_info_extractor(youtube_pl_ie)
1192                 fd.add_info_extractor(metacafe_ie)
1193                 fd.add_info_extractor(youtube_ie)
1194                 retcode = fd.download(all_urls)
1195                 sys.exit(retcode)
1196
1197         except DownloadError:
1198                 sys.exit(1)
1199         except SameFileError:
1200                 sys.exit(u'ERROR: fixed output name but more than one file to download')
1201         except KeyboardInterrupt:
1202                 sys.exit(u'\nERROR: Interrupted by user')