vimeo: Also accept URLs prefixed by www.
[youtube-dl.git] / youtube-dl
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Author: Ricardo Garcia Gonzalez
4 # Author: Danny Colligan
5 # Author: Benjamin Johnson
6 # Author: Vasyl' Vavrychuk
7 # Author: Witold Baryluk
8 # Author: PaweÅ‚ Paprota
9 # License: Public domain code
10 import cookielib
11 import ctypes
12 import datetime
13 import email.utils
14 import gzip
15 import htmlentitydefs
16 import httplib
17 import locale
18 import math
19 import netrc
20 import os
21 import os.path
22 import re
23 import socket
24 import string
25 import StringIO
26 import subprocess
27 import sys
28 import time
29 import urllib
30 import urllib2
31 import zlib
32
33 # parse_qs was moved from the cgi module to the urlparse module recently.
34 try:
35         from urlparse import parse_qs
36 except ImportError:
37         from cgi import parse_qs
38
39 std_headers = {
40         'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0b10) Gecko/20100101 Firefox/4.0b10',
41         'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
42         'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
43         'Accept-Encoding': 'gzip, deflate',
44         'Accept-Language': 'en-us,en;q=0.5',
45 }
46
47 simple_title_chars = string.ascii_letters.decode('ascii') + string.digits.decode('ascii')
48
49 def preferredencoding():
50         """Get preferred encoding.
51
52         Returns the best encoding scheme for the system, based on
53         locale.getpreferredencoding() and some further tweaks.
54         """
55         def yield_preferredencoding():
56                 try:
57                         pref = locale.getpreferredencoding()
58                         u'TEST'.encode(pref)
59                 except:
60                         pref = 'UTF-8'
61                 while True:
62                         yield pref
63         return yield_preferredencoding().next()
64
65 def htmlentity_transform(matchobj):
66         """Transforms an HTML entity to a Unicode character.
67
68         This function receives a match object and is intended to be used with
69         the re.sub() function.
70         """
71         entity = matchobj.group(1)
72
73         # Known non-numeric HTML entity
74         if entity in htmlentitydefs.name2codepoint:
75                 return unichr(htmlentitydefs.name2codepoint[entity])
76
77         # Unicode character
78         mobj = re.match(ur'(?u)#(x?\d+)', entity)
79         if mobj is not None:
80                 numstr = mobj.group(1)
81                 if numstr.startswith(u'x'):
82                         base = 16
83                         numstr = u'0%s' % numstr
84                 else:
85                         base = 10
86                 return unichr(long(numstr, base))
87
88         # Unknown entity in name, return its literal representation
89         return (u'&%s;' % entity)
90
91 def sanitize_title(utitle):
92         """Sanitizes a video title so it could be used as part of a filename."""
93         utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle)
94         return utitle.replace(unicode(os.sep), u'%')
95
96 def sanitize_open(filename, open_mode):
97         """Try to open the given filename, and slightly tweak it if this fails.
98
99         Attempts to open the given filename. If this fails, it tries to change
100         the filename slightly, step by step, until it's either able to open it
101         or it fails and raises a final exception, like the standard open()
102         function.
103
104         It returns the tuple (stream, definitive_file_name).
105         """
106         try:
107                 if filename == u'-':
108                         if sys.platform == 'win32':
109                                 import msvcrt
110                                 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
111                         return (sys.stdout, filename)
112                 stream = open(filename, open_mode)
113                 return (stream, filename)
114         except (IOError, OSError), err:
115                 # In case of error, try to remove win32 forbidden chars
116                 filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename)
117
118                 # An exception here should be caught in the caller
119                 stream = open(filename, open_mode)
120                 return (stream, filename)
121
122 def timeconvert(timestr):
123     """Convert RFC 2822 defined time string into system timestamp"""
124     timestamp = None
125     timetuple = email.utils.parsedate_tz(timestr)
126     if timetuple is not None:
127         timestamp = email.utils.mktime_tz(timetuple)
128     return timestamp
129
130 class DownloadError(Exception):
131         """Download Error exception.
132
133         This exception may be thrown by FileDownloader objects if they are not
134         configured to continue on errors. They will contain the appropriate
135         error message.
136         """
137         pass
138
139 class SameFileError(Exception):
140         """Same File exception.
141
142         This exception will be thrown by FileDownloader objects if they detect
143         multiple files would have to be downloaded to the same file on disk.
144         """
145         pass
146
147 class PostProcessingError(Exception):
148         """Post Processing exception.
149
150         This exception may be raised by PostProcessor's .run() method to
151         indicate an error in the postprocessing task.
152         """
153         pass
154
155 class UnavailableVideoError(Exception):
156         """Unavailable Format exception.
157
158         This exception will be thrown when a video is requested
159         in a format that is not available for that video.
160         """
161         pass
162
163 class ContentTooShortError(Exception):
164         """Content Too Short exception.
165
166         This exception may be raised by FileDownloader objects when a file they
167         download is too small for what the server announced first, indicating
168         the connection was probably interrupted.
169         """
170         # Both in bytes
171         downloaded = None
172         expected = None
173
174         def __init__(self, downloaded, expected):
175                 self.downloaded = downloaded
176                 self.expected = expected
177
178 class YoutubeDLHandler(urllib2.HTTPHandler):
179         """Handler for HTTP requests and responses.
180
181         This class, when installed with an OpenerDirector, automatically adds
182         the standard headers to every HTTP request and handles gzipped and
183         deflated responses from web servers. If compression is to be avoided in
184         a particular request, the original request in the program code only has
185         to include the HTTP header "Youtubedl-No-Compression", which will be
186         removed before making the real request.
187         
188         Part of this code was copied from:
189
190           http://techknack.net/python-urllib2-handlers/
191           
192         Andrew Rowls, the author of that code, agreed to release it to the
193         public domain.
194         """
195
196         @staticmethod
197         def deflate(data):
198                 try:
199                         return zlib.decompress(data, -zlib.MAX_WBITS)
200                 except zlib.error:
201                         return zlib.decompress(data)
202         
203         @staticmethod
204         def addinfourl_wrapper(stream, headers, url, code):
205                 if hasattr(urllib2.addinfourl, 'getcode'):
206                         return urllib2.addinfourl(stream, headers, url, code)
207                 ret = urllib2.addinfourl(stream, headers, url)
208                 ret.code = code
209                 return ret
210         
211         def http_request(self, req):
212                 for h in std_headers:
213                         if h in req.headers:
214                                 del req.headers[h]
215                         req.add_header(h, std_headers[h])
216                 if 'Youtubedl-no-compression' in req.headers:
217                         if 'Accept-encoding' in req.headers:
218                                 del req.headers['Accept-encoding']
219                         del req.headers['Youtubedl-no-compression']
220                 return req
221
222         def http_response(self, req, resp):
223                 old_resp = resp
224                 # gzip
225                 if resp.headers.get('Content-encoding', '') == 'gzip':
226                         gz = gzip.GzipFile(fileobj=StringIO.StringIO(resp.read()), mode='r')
227                         resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
228                         resp.msg = old_resp.msg
229                 # deflate
230                 if resp.headers.get('Content-encoding', '') == 'deflate':
231                         gz = StringIO.StringIO(self.deflate(resp.read()))
232                         resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
233                         resp.msg = old_resp.msg
234                 return resp
235
236 class FileDownloader(object):
237         """File Downloader class.
238
239         File downloader objects are the ones responsible of downloading the
240         actual video file and writing it to disk if the user has requested
241         it, among some other tasks. In most cases there should be one per
242         program. As, given a video URL, the downloader doesn't know how to
243         extract all the needed information, task that InfoExtractors do, it
244         has to pass the URL to one of them.
245
246         For this, file downloader objects have a method that allows
247         InfoExtractors to be registered in a given order. When it is passed
248         a URL, the file downloader handles it to the first InfoExtractor it
249         finds that reports being able to handle it. The InfoExtractor extracts
250         all the information about the video or videos the URL refers to, and
251         asks the FileDownloader to process the video information, possibly
252         downloading the video.
253
254         File downloaders accept a lot of parameters. In order not to saturate
255         the object constructor with arguments, it receives a dictionary of
256         options instead. These options are available through the params
257         attribute for the InfoExtractors to use. The FileDownloader also
258         registers itself as the downloader in charge for the InfoExtractors
259         that are added to it, so this is a "mutual registration".
260
261         Available options:
262
263         username:         Username for authentication purposes.
264         password:         Password for authentication purposes.
265         usenetrc:         Use netrc for authentication instead.
266         quiet:            Do not print messages to stdout.
267         forceurl:         Force printing final URL.
268         forcetitle:       Force printing title.
269         forcethumbnail:   Force printing thumbnail URL.
270         forcedescription: Force printing description.
271         forcefilename:    Force printing final filename.
272         simulate:         Do not download the video files.
273         format:           Video format code.
274         format_limit:     Highest quality format to try.
275         outtmpl:          Template for output names.
276         ignoreerrors:     Do not stop on download errors.
277         ratelimit:        Download speed limit, in bytes/sec.
278         nooverwrites:     Prevent overwriting files.
279         retries:          Number of times to retry for HTTP error 5xx
280         continuedl:       Try to continue downloads if possible.
281         noprogress:       Do not print the progress bar.
282         playliststart:    Playlist item to start at.
283         playlistend:      Playlist item to end at.
284         logtostderr:      Log messages to stderr instead of stdout.
285         consoletitle:     Display progress in console window's titlebar.
286         nopart:           Do not use temporary .part files.
287         updatetime:       Use the Last-modified header to set output file timestamps.
288         """
289
290         params = None
291         _ies = []
292         _pps = []
293         _download_retcode = None
294         _num_downloads = None
295         _screen_file = None
296
297         def __init__(self, params):
298                 """Create a FileDownloader object with the given options."""
299                 self._ies = []
300                 self._pps = []
301                 self._download_retcode = 0
302                 self._num_downloads = 0
303                 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
304                 self.params = params
305
306         @staticmethod
307         def pmkdir(filename):
308                 """Create directory components in filename. Similar to Unix "mkdir -p"."""
309                 components = filename.split(os.sep)
310                 aggregate = [os.sep.join(components[0:x]) for x in xrange(1, len(components))]
311                 aggregate = ['%s%s' % (x, os.sep) for x in aggregate] # Finish names with separator
312                 for dir in aggregate:
313                         if not os.path.exists(dir):
314                                 os.mkdir(dir)
315
316         @staticmethod
317         def format_bytes(bytes):
318                 if bytes is None:
319                         return 'N/A'
320                 if type(bytes) is str:
321                         bytes = float(bytes)
322                 if bytes == 0.0:
323                         exponent = 0
324                 else:
325                         exponent = long(math.log(bytes, 1024.0))
326                 suffix = 'bkMGTPEZY'[exponent]
327                 converted = float(bytes) / float(1024**exponent)
328                 return '%.2f%s' % (converted, suffix)
329
330         @staticmethod
331         def calc_percent(byte_counter, data_len):
332                 if data_len is None:
333                         return '---.-%'
334                 return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0))
335
336         @staticmethod
337         def calc_eta(start, now, total, current):
338                 if total is None:
339                         return '--:--'
340                 dif = now - start
341                 if current == 0 or dif < 0.001: # One millisecond
342                         return '--:--'
343                 rate = float(current) / dif
344                 eta = long((float(total) - float(current)) / rate)
345                 (eta_mins, eta_secs) = divmod(eta, 60)
346                 if eta_mins > 99:
347                         return '--:--'
348                 return '%02d:%02d' % (eta_mins, eta_secs)
349
350         @staticmethod
351         def calc_speed(start, now, bytes):
352                 dif = now - start
353                 if bytes == 0 or dif < 0.001: # One millisecond
354                         return '%10s' % '---b/s'
355                 return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif))
356
357         @staticmethod
358         def best_block_size(elapsed_time, bytes):
359                 new_min = max(bytes / 2.0, 1.0)
360                 new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
361                 if elapsed_time < 0.001:
362                         return long(new_max)
363                 rate = bytes / elapsed_time
364                 if rate > new_max:
365                         return long(new_max)
366                 if rate < new_min:
367                         return long(new_min)
368                 return long(rate)
369
370         @staticmethod
371         def parse_bytes(bytestr):
372                 """Parse a string indicating a byte quantity into a long integer."""
373                 matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
374                 if matchobj is None:
375                         return None
376                 number = float(matchobj.group(1))
377                 multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
378                 return long(round(number * multiplier))
379
380         def add_info_extractor(self, ie):
381                 """Add an InfoExtractor object to the end of the list."""
382                 self._ies.append(ie)
383                 ie.set_downloader(self)
384
385         def add_post_processor(self, pp):
386                 """Add a PostProcessor object to the end of the chain."""
387                 self._pps.append(pp)
388                 pp.set_downloader(self)
389
390         def to_screen(self, message, skip_eol=False, ignore_encoding_errors=False):
391                 """Print message to stdout if not in quiet mode."""
392                 try:
393                         if not self.params.get('quiet', False):
394                                 terminator = [u'\n', u''][skip_eol]
395                                 print >>self._screen_file, (u'%s%s' % (message, terminator)).encode(preferredencoding()),
396                         self._screen_file.flush()
397                 except (UnicodeEncodeError), err:
398                         if not ignore_encoding_errors:
399                                 raise
400
401         def to_stderr(self, message):
402                 """Print message to stderr."""
403                 print >>sys.stderr, message.encode(preferredencoding())
404
405         def to_cons_title(self, message):
406                 """Set console/terminal window title to message."""
407                 if not self.params.get('consoletitle', False):
408                         return
409                 if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
410                         # c_wchar_p() might not be necessary if `message` is
411                         # already of type unicode()
412                         ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
413                 elif 'TERM' in os.environ:
414                         sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding()))
415
416         def fixed_template(self):
417                 """Checks if the output template is fixed."""
418                 return (re.search(ur'(?u)%\(.+?\)s', self.params['outtmpl']) is None)
419
420         def trouble(self, message=None):
421                 """Determine action to take when a download problem appears.
422
423                 Depending on if the downloader has been configured to ignore
424                 download errors or not, this method may throw an exception or
425                 not when errors are found, after printing the message.
426                 """
427                 if message is not None:
428                         self.to_stderr(message)
429                 if not self.params.get('ignoreerrors', False):
430                         raise DownloadError(message)
431                 self._download_retcode = 1
432
433         def slow_down(self, start_time, byte_counter):
434                 """Sleep if the download speed is over the rate limit."""
435                 rate_limit = self.params.get('ratelimit', None)
436                 if rate_limit is None or byte_counter == 0:
437                         return
438                 now = time.time()
439                 elapsed = now - start_time
440                 if elapsed <= 0.0:
441                         return
442                 speed = float(byte_counter) / elapsed
443                 if speed > rate_limit:
444                         time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
445
446         def temp_name(self, filename):
447                 """Returns a temporary filename for the given filename."""
448                 if self.params.get('nopart', False) or filename == u'-' or \
449                                 (os.path.exists(filename) and not os.path.isfile(filename)):
450                         return filename
451                 return filename + u'.part'
452
453         def undo_temp_name(self, filename):
454                 if filename.endswith(u'.part'):
455                         return filename[:-len(u'.part')]
456                 return filename
457
458         def try_rename(self, old_filename, new_filename):
459                 try:
460                         if old_filename == new_filename:
461                                 return
462                         os.rename(old_filename, new_filename)
463                 except (IOError, OSError), err:
464                         self.trouble(u'ERROR: unable to rename file')
465         
466         def try_utime(self, filename, last_modified_hdr):
467                 """Try to set the last-modified time of the given file."""
468                 if last_modified_hdr is None:
469                         return
470                 if not os.path.isfile(filename):
471                         return
472                 timestr = last_modified_hdr
473                 if timestr is None:
474                         return
475                 filetime = timeconvert(timestr)
476                 if filetime is None:
477                         return
478                 try:
479                         os.utime(filename,(time.time(), filetime))
480                 except:
481                         pass
482
483         def report_destination(self, filename):
484                 """Report destination filename."""
485                 self.to_screen(u'[download] Destination: %s' % filename, ignore_encoding_errors=True)
486
487         def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
488                 """Report download progress."""
489                 if self.params.get('noprogress', False):
490                         return
491                 self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
492                                 (percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
493                 self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' %
494                                 (percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip()))
495
496         def report_resuming_byte(self, resume_len):
497                 """Report attempt to resume at given byte."""
498                 self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
499
500         def report_retry(self, count, retries):
501                 """Report retry in case of HTTP error 5xx"""
502                 self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
503
504         def report_file_already_downloaded(self, file_name):
505                 """Report file has already been fully downloaded."""
506                 try:
507                         self.to_screen(u'[download] %s has already been downloaded' % file_name)
508                 except (UnicodeEncodeError), err:
509                         self.to_screen(u'[download] The file has already been downloaded')
510
511         def report_unable_to_resume(self):
512                 """Report it was impossible to resume download."""
513                 self.to_screen(u'[download] Unable to resume')
514
515         def report_finish(self):
516                 """Report download finished."""
517                 if self.params.get('noprogress', False):
518                         self.to_screen(u'[download] Download completed')
519                 else:
520                         self.to_screen(u'')
521
522         def increment_downloads(self):
523                 """Increment the ordinal that assigns a number to each file."""
524                 self._num_downloads += 1
525
526         def prepare_filename(self, info_dict):
527                 """Generate the output filename."""
528                 try:
529                         template_dict = dict(info_dict)
530                         template_dict['epoch'] = unicode(long(time.time()))
531                         template_dict['autonumber'] = unicode('%05d' % self._num_downloads)
532                         filename = self.params['outtmpl'] % template_dict
533                         return filename
534                 except (ValueError, KeyError), err:
535                         self.trouble(u'ERROR: invalid system charset or erroneous output template')
536                         return None
537
538         def process_info(self, info_dict):
539                 """Process a single dictionary returned by an InfoExtractor."""
540                 filename = self.prepare_filename(info_dict)
541                 # Do nothing else if in simulate mode
542                 if self.params.get('simulate', False):
543                         # Forced printings
544                         if self.params.get('forcetitle', False):
545                                 print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
546                         if self.params.get('forceurl', False):
547                                 print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace')
548                         if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
549                                 print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace')
550                         if self.params.get('forcedescription', False) and 'description' in info_dict:
551                                 print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace')
552                         if self.params.get('forcefilename', False) and filename is not None:
553                                 print filename.encode(preferredencoding(), 'xmlcharrefreplace')
554
555                         return
556
557                 if filename is None:
558                         return
559                 if self.params.get('nooverwrites', False) and os.path.exists(filename):
560                         self.to_stderr(u'WARNING: file exists and will be skipped')
561                         return
562
563                 try:
564                         self.pmkdir(filename)
565                 except (OSError, IOError), err:
566                         self.trouble(u'ERROR: unable to create directories: %s' % str(err))
567                         return
568
569                 try:
570                         success = self._do_download(filename, info_dict['url'].encode('utf-8'), info_dict.get('player_url', None))
571                 except (OSError, IOError), err:
572                         raise UnavailableVideoError
573                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
574                         self.trouble(u'ERROR: unable to download video data: %s' % str(err))
575                         return
576                 except (ContentTooShortError, ), err:
577                         self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
578                         return
579
580                 if success:
581                         try:
582                                 self.post_process(filename, info_dict)
583                         except (PostProcessingError), err:
584                                 self.trouble(u'ERROR: postprocessing: %s' % str(err))
585                                 return
586
587         def download(self, url_list):
588                 """Download a given list of URLs."""
589                 if len(url_list) > 1 and self.fixed_template():
590                         raise SameFileError(self.params['outtmpl'])
591
592                 for url in url_list:
593                         suitable_found = False
594                         for ie in self._ies:
595                                 # Go to next InfoExtractor if not suitable
596                                 if not ie.suitable(url):
597                                         continue
598
599                                 # Suitable InfoExtractor found
600                                 suitable_found = True
601
602                                 # Extract information from URL and process it
603                                 ie.extract(url)
604
605                                 # Suitable InfoExtractor had been found; go to next URL
606                                 break
607
608                         if not suitable_found:
609                                 self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
610
611                 return self._download_retcode
612
613         def post_process(self, filename, ie_info):
614                 """Run the postprocessing chain on the given file."""
615                 info = dict(ie_info)
616                 info['filepath'] = filename
617                 for pp in self._pps:
618                         info = pp.run(info)
619                         if info is None:
620                                 break
621
622         def _download_with_rtmpdump(self, filename, url, player_url):
623                 self.report_destination(filename)
624                 tmpfilename = self.temp_name(filename)
625
626                 # Check for rtmpdump first
627                 try:
628                         subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
629                 except (OSError, IOError):
630                         self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
631                         return False
632
633                 # Download using rtmpdump. rtmpdump returns exit code 2 when
634                 # the connection was interrumpted and resuming appears to be
635                 # possible. This is part of rtmpdump's normal usage, AFAIK.
636                 basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename]
637                 retval = subprocess.call(basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)])
638                 while retval == 2 or retval == 1:
639                         prevsize = os.path.getsize(tmpfilename)
640                         self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
641                         time.sleep(5.0) # This seems to be needed
642                         retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
643                         cursize = os.path.getsize(tmpfilename)
644                         if prevsize == cursize and retval == 1:
645                                 break
646                 if retval == 0:
647                         self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(tmpfilename))
648                         self.try_rename(tmpfilename, filename)
649                         return True
650                 else:
651                         self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
652                         return False
653
654         def _do_download(self, filename, url, player_url):
655                 # Check file already present
656                 if self.params.get('continuedl', False) and os.path.isfile(filename) and not self.params.get('nopart', False):
657                         self.report_file_already_downloaded(filename)
658                         return True
659
660                 # Attempt to download using rtmpdump
661                 if url.startswith('rtmp'):
662                         return self._download_with_rtmpdump(filename, url, player_url)
663
664                 tmpfilename = self.temp_name(filename)
665                 stream = None
666                 open_mode = 'wb'
667
668                 # Do not include the Accept-Encoding header
669                 headers = {'Youtubedl-no-compression': 'True'}
670                 basic_request = urllib2.Request(url, None, headers)
671                 request = urllib2.Request(url, None, headers)
672
673                 # Establish possible resume length
674                 if os.path.isfile(tmpfilename):
675                         resume_len = os.path.getsize(tmpfilename)
676                 else:
677                         resume_len = 0
678
679                 # Request parameters in case of being able to resume
680                 if self.params.get('continuedl', False) and resume_len != 0:
681                         self.report_resuming_byte(resume_len)
682                         request.add_header('Range','bytes=%d-' % resume_len)
683                         open_mode = 'ab'
684
685                 count = 0
686                 retries = self.params.get('retries', 0)
687                 while count <= retries:
688                         # Establish connection
689                         try:
690                                 data = urllib2.urlopen(request)
691                                 break
692                         except (urllib2.HTTPError, ), err:
693                                 if (err.code < 500 or err.code >= 600) and err.code != 416:
694                                         # Unexpected HTTP error
695                                         raise
696                                 elif err.code == 416:
697                                         # Unable to resume (requested range not satisfiable)
698                                         try:
699                                                 # Open the connection again without the range header
700                                                 data = urllib2.urlopen(basic_request)
701                                                 content_length = data.info()['Content-Length']
702                                         except (urllib2.HTTPError, ), err:
703                                                 if err.code < 500 or err.code >= 600:
704                                                         raise
705                                         else:
706                                                 # Examine the reported length
707                                                 if (content_length is not None and
708                                                     (resume_len - 100 < long(content_length) < resume_len + 100)):
709                                                         # The file had already been fully downloaded.
710                                                         # Explanation to the above condition: in issue #175 it was revealed that
711                                                         # YouTube sometimes adds or removes a few bytes from the end of the file,
712                                                         # changing the file size slightly and causing problems for some users. So
713                                                         # I decided to implement a suggested change and consider the file
714                                                         # completely downloaded if the file size differs less than 100 bytes from
715                                                         # the one in the hard drive.
716                                                         self.report_file_already_downloaded(filename)
717                                                         self.try_rename(tmpfilename, filename)
718                                                         return True
719                                                 else:
720                                                         # The length does not match, we start the download over
721                                                         self.report_unable_to_resume()
722                                                         open_mode = 'wb'
723                                                         break
724                         # Retry
725                         count += 1
726                         if count <= retries:
727                                 self.report_retry(count, retries)
728
729                 if count > retries:
730                         self.trouble(u'ERROR: giving up after %s retries' % retries)
731                         return False
732
733                 data_len = data.info().get('Content-length', None)
734                 if data_len is not None:
735                         data_len = long(data_len) + resume_len
736                 data_len_str = self.format_bytes(data_len)
737                 byte_counter = 0 + resume_len
738                 block_size = 1024
739                 start = time.time()
740                 while True:
741                         # Download and write
742                         before = time.time()
743                         data_block = data.read(block_size)
744                         after = time.time()
745                         if len(data_block) == 0:
746                                 break
747                         byte_counter += len(data_block)
748
749                         # Open file just in time
750                         if stream is None:
751                                 try:
752                                         (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
753                                         filename = self.undo_temp_name(tmpfilename)
754                                         self.report_destination(filename)
755                                 except (OSError, IOError), err:
756                                         self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
757                                         return False
758                         try:
759                                 stream.write(data_block)
760                         except (IOError, OSError), err:
761                                 self.trouble(u'\nERROR: unable to write data: %s' % str(err))
762                                 return False
763                         block_size = self.best_block_size(after - before, len(data_block))
764
765                         # Progress message
766                         percent_str = self.calc_percent(byte_counter, data_len)
767                         eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
768                         speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len)
769                         self.report_progress(percent_str, data_len_str, speed_str, eta_str)
770
771                         # Apply rate limit
772                         self.slow_down(start, byte_counter - resume_len)
773
774                 stream.close()
775                 self.report_finish()
776                 if data_len is not None and byte_counter != data_len:
777                         raise ContentTooShortError(byte_counter, long(data_len))
778                 self.try_rename(tmpfilename, filename)
779
780                 # Update file modification time
781                 if self.params.get('updatetime', True):
782                         self.try_utime(filename, data.info().get('last-modified', None))
783
784                 return True
785
786 class InfoExtractor(object):
787         """Information Extractor class.
788
789         Information extractors are the classes that, given a URL, extract
790         information from the video (or videos) the URL refers to. This
791         information includes the real video URL, the video title and simplified
792         title, author and others. The information is stored in a dictionary
793         which is then passed to the FileDownloader. The FileDownloader
794         processes this information possibly downloading the video to the file
795         system, among other possible outcomes. The dictionaries must include
796         the following fields:
797
798         id:             Video identifier.
799         url:            Final video URL.
800         uploader:       Nickname of the video uploader.
801         title:          Literal title.
802         stitle:         Simplified title.
803         ext:            Video filename extension.
804         format:         Video format.
805         player_url:     SWF Player URL (may be None).
806
807         The following fields are optional. Their primary purpose is to allow
808         youtube-dl to serve as the backend for a video search function, such
809         as the one in youtube2mp3.  They are only used when their respective
810         forced printing functions are called:
811
812         thumbnail:      Full URL to a video thumbnail image.
813         description:    One-line video description.
814
815         Subclasses of this one should re-define the _real_initialize() and
816         _real_extract() methods, as well as the suitable() static method.
817         Probably, they should also be instantiated and added to the main
818         downloader.
819         """
820
821         _ready = False
822         _downloader = None
823
824         def __init__(self, downloader=None):
825                 """Constructor. Receives an optional downloader."""
826                 self._ready = False
827                 self.set_downloader(downloader)
828
829         @staticmethod
830         def suitable(url):
831                 """Receives a URL and returns True if suitable for this IE."""
832                 return False
833
834         def initialize(self):
835                 """Initializes an instance (authentication, etc)."""
836                 if not self._ready:
837                         self._real_initialize()
838                         self._ready = True
839
840         def extract(self, url):
841                 """Extracts URL information and returns it in list of dicts."""
842                 self.initialize()
843                 return self._real_extract(url)
844
845         def set_downloader(self, downloader):
846                 """Sets the downloader for this IE."""
847                 self._downloader = downloader
848
849         def _real_initialize(self):
850                 """Real initialization process. Redefine in subclasses."""
851                 pass
852
853         def _real_extract(self, url):
854                 """Real extraction process. Redefine in subclasses."""
855                 pass
856
857 class YoutubeIE(InfoExtractor):
858         """Information extractor for youtube.com."""
859
860         _VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?:(?:(?:v|embed)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=)))?([0-9A-Za-z_-]+)(?(1).+)?$'
861         _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
862         _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
863         _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
864         _NETRC_MACHINE = 'youtube'
865         # Listed in order of quality
866         _available_formats = ['38', '37', '22', '45', '35', '34', '43', '18', '6', '5', '17', '13']
867         _video_extensions = {
868                 '13': '3gp',
869                 '17': 'mp4',
870                 '18': 'mp4',
871                 '22': 'mp4',
872                 '37': 'mp4',
873                 '38': 'video', # You actually don't know if this will be MOV, AVI or whatever
874                 '43': 'webm',
875                 '45': 'webm',
876         }
877
878         @staticmethod
879         def suitable(url):
880                 return (re.match(YoutubeIE._VALID_URL, url) is not None)
881
882         def report_lang(self):
883                 """Report attempt to set language."""
884                 self._downloader.to_screen(u'[youtube] Setting language')
885
886         def report_login(self):
887                 """Report attempt to log in."""
888                 self._downloader.to_screen(u'[youtube] Logging in')
889
890         def report_age_confirmation(self):
891                 """Report attempt to confirm age."""
892                 self._downloader.to_screen(u'[youtube] Confirming age')
893
894         def report_video_webpage_download(self, video_id):
895                 """Report attempt to download video webpage."""
896                 self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id)
897
898         def report_video_info_webpage_download(self, video_id):
899                 """Report attempt to download video info webpage."""
900                 self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id)
901
902         def report_information_extraction(self, video_id):
903                 """Report attempt to extract video information."""
904                 self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id)
905
906         def report_unavailable_format(self, video_id, format):
907                 """Report extracted video URL."""
908                 self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format))
909
910         def report_rtmp_download(self):
911                 """Indicate the download will use the RTMP protocol."""
912                 self._downloader.to_screen(u'[youtube] RTMP download detected')
913
914         def _real_initialize(self):
915                 if self._downloader is None:
916                         return
917
918                 username = None
919                 password = None
920                 downloader_params = self._downloader.params
921
922                 # Attempt to use provided username and password or .netrc data
923                 if downloader_params.get('username', None) is not None:
924                         username = downloader_params['username']
925                         password = downloader_params['password']
926                 elif downloader_params.get('usenetrc', False):
927                         try:
928                                 info = netrc.netrc().authenticators(self._NETRC_MACHINE)
929                                 if info is not None:
930                                         username = info[0]
931                                         password = info[2]
932                                 else:
933                                         raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
934                         except (IOError, netrc.NetrcParseError), err:
935                                 self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
936                                 return
937
938                 # Set language
939                 request = urllib2.Request(self._LANG_URL)
940                 try:
941                         self.report_lang()
942                         urllib2.urlopen(request).read()
943                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
944                         self._downloader.to_stderr(u'WARNING: unable to set language: %s' % str(err))
945                         return
946
947                 # No authentication to be performed
948                 if username is None:
949                         return
950
951                 # Log in
952                 login_form = {
953                                 'current_form': 'loginForm',
954                                 'next':         '/',
955                                 'action_login': 'Log In',
956                                 'username':     username,
957                                 'password':     password,
958                                 }
959                 request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form))
960                 try:
961                         self.report_login()
962                         login_results = urllib2.urlopen(request).read()
963                         if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
964                                 self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
965                                 return
966                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
967                         self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
968                         return
969
970                 # Confirm age
971                 age_form = {
972                                 'next_url':             '/',
973                                 'action_confirm':       'Confirm',
974                                 }
975                 request = urllib2.Request(self._AGE_URL, urllib.urlencode(age_form))
976                 try:
977                         self.report_age_confirmation()
978                         age_results = urllib2.urlopen(request).read()
979                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
980                         self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
981                         return
982
983         def _real_extract(self, url):
984                 # Extract video id from URL
985                 mobj = re.match(self._VALID_URL, url)
986                 if mobj is None:
987                         self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
988                         return
989                 video_id = mobj.group(2)
990
991                 # Get video webpage
992                 self.report_video_webpage_download(video_id)
993                 request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&amp;has_verified=1' % video_id)
994                 try:
995                         video_webpage = urllib2.urlopen(request).read()
996                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
997                         self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
998                         return
999
1000                 # Attempt to extract SWF player URL
1001                 mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1002                 if mobj is not None:
1003                         player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
1004                 else:
1005                         player_url = None
1006
1007                 # Get video info
1008                 self.report_video_info_webpage_download(video_id)
1009                 for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
1010                         video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
1011                                            % (video_id, el_type))
1012                         request = urllib2.Request(video_info_url)
1013                         try:
1014                                 video_info_webpage = urllib2.urlopen(request).read()
1015                                 video_info = parse_qs(video_info_webpage)
1016                                 if 'token' in video_info:
1017                                         break
1018                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1019                                 self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
1020                                 return
1021                 if 'token' not in video_info:
1022                         if 'reason' in video_info:
1023                                 self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0].decode('utf-8'))
1024                         else:
1025                                 self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
1026                         return
1027
1028                 # Start extracting information
1029                 self.report_information_extraction(video_id)
1030
1031                 # uploader
1032                 if 'author' not in video_info:
1033                         self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
1034                         return
1035                 video_uploader = urllib.unquote_plus(video_info['author'][0])
1036
1037                 # title
1038                 if 'title' not in video_info:
1039                         self._downloader.trouble(u'ERROR: unable to extract video title')
1040                         return
1041                 video_title = urllib.unquote_plus(video_info['title'][0])
1042                 video_title = video_title.decode('utf-8')
1043                 video_title = sanitize_title(video_title)
1044
1045                 # simplified title
1046                 simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
1047                 simple_title = simple_title.strip(ur'_')
1048
1049                 # thumbnail image
1050                 if 'thumbnail_url' not in video_info:
1051                         self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
1052                         video_thumbnail = ''
1053                 else:   # don't panic if we can't find it
1054                         video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0])
1055
1056                 # upload date
1057                 upload_date = u'NA'
1058                 mobj = re.search(r'id="eow-date".*?>(.*?)</span>', video_webpage, re.DOTALL)
1059                 if mobj is not None:
1060                         upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
1061                         format_expressions = ['%d %B %Y', '%B %d %Y']
1062                         for expression in format_expressions:
1063                                 try:
1064                                         upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d')
1065                                 except:
1066                                         pass
1067
1068                 # description
1069                 video_description = 'No description available.'
1070                 if self._downloader.params.get('forcedescription', False):
1071                         mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', video_webpage)
1072                         if mobj is not None:
1073                                 video_description = mobj.group(1)
1074
1075                 # token
1076                 video_token = urllib.unquote_plus(video_info['token'][0])
1077
1078                 # Decide which formats to download
1079                 req_format = self._downloader.params.get('format', None)
1080
1081                 if 'fmt_url_map' in video_info:
1082                         url_map = dict(tuple(pair.split('|')) for pair in video_info['fmt_url_map'][0].split(','))
1083                         format_limit = self._downloader.params.get('format_limit', None)
1084                         if format_limit is not None and format_limit in self._available_formats:
1085                                 format_list = self._available_formats[self._available_formats.index(format_limit):]
1086                         else:
1087                                 format_list = self._available_formats
1088                         existing_formats = [x for x in format_list if x in url_map]
1089                         if len(existing_formats) == 0:
1090                                 self._downloader.trouble(u'ERROR: no known formats available for video')
1091                                 return
1092                         if req_format is None:
1093                                 video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
1094                         elif req_format == '-1':
1095                                 video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
1096                         else:
1097                                 # Specific format
1098                                 if req_format not in url_map:
1099                                         self._downloader.trouble(u'ERROR: requested format not available')
1100                                         return
1101                                 video_url_list = [(req_format, url_map[req_format])] # Specific format
1102
1103                 elif 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
1104                         self.report_rtmp_download()
1105                         video_url_list = [(None, video_info['conn'][0])]
1106
1107                 else:
1108                         self._downloader.trouble(u'ERROR: no fmt_url_map or conn information found in video info')
1109                         return
1110
1111                 for format_param, video_real_url in video_url_list:
1112                         # At this point we have a new video
1113                         self._downloader.increment_downloads()
1114
1115                         # Extension
1116                         video_extension = self._video_extensions.get(format_param, 'flv')
1117
1118                         # Find the video URL in fmt_url_map or conn paramters
1119                         try:
1120                                 # Process video information
1121                                 self._downloader.process_info({
1122                                         'id':           video_id.decode('utf-8'),
1123                                         'url':          video_real_url.decode('utf-8'),
1124                                         'uploader':     video_uploader.decode('utf-8'),
1125                                         'upload_date':  upload_date,
1126                                         'title':        video_title,
1127                                         'stitle':       simple_title,
1128                                         'ext':          video_extension.decode('utf-8'),
1129                                         'format':       (format_param is None and u'NA' or format_param.decode('utf-8')),
1130                                         'thumbnail':    video_thumbnail.decode('utf-8'),
1131                                         'description':  video_description.decode('utf-8'),
1132                                         'player_url':   player_url,
1133                                 })
1134                         except UnavailableVideoError, err:
1135                                 self._downloader.trouble(u'\nERROR: unable to download video')
1136
1137
1138 class MetacafeIE(InfoExtractor):
1139         """Information Extractor for metacafe.com."""
1140
1141         _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
1142         _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
1143         _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
1144         _youtube_ie = None
1145
1146         def __init__(self, youtube_ie, downloader=None):
1147                 InfoExtractor.__init__(self, downloader)
1148                 self._youtube_ie = youtube_ie
1149
1150         @staticmethod
1151         def suitable(url):
1152                 return (re.match(MetacafeIE._VALID_URL, url) is not None)
1153
1154         def report_disclaimer(self):
1155                 """Report disclaimer retrieval."""
1156                 self._downloader.to_screen(u'[metacafe] Retrieving disclaimer')
1157
1158         def report_age_confirmation(self):
1159                 """Report attempt to confirm age."""
1160                 self._downloader.to_screen(u'[metacafe] Confirming age')
1161
1162         def report_download_webpage(self, video_id):
1163                 """Report webpage download."""
1164                 self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id)
1165
1166         def report_extraction(self, video_id):
1167                 """Report information extraction."""
1168                 self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id)
1169
1170         def _real_initialize(self):
1171                 # Retrieve disclaimer
1172                 request = urllib2.Request(self._DISCLAIMER)
1173                 try:
1174                         self.report_disclaimer()
1175                         disclaimer = urllib2.urlopen(request).read()
1176                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1177                         self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err))
1178                         return
1179
1180                 # Confirm age
1181                 disclaimer_form = {
1182                         'filters': '0',
1183                         'submit': "Continue - I'm over 18",
1184                         }
1185                 request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form))
1186                 try:
1187                         self.report_age_confirmation()
1188                         disclaimer = urllib2.urlopen(request).read()
1189                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1190                         self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
1191                         return
1192
1193         def _real_extract(self, url):
1194                 # Extract id and simplified title from URL
1195                 mobj = re.match(self._VALID_URL, url)
1196                 if mobj is None:
1197                         self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
1198                         return
1199
1200                 video_id = mobj.group(1)
1201
1202                 # Check if video comes from YouTube
1203                 mobj2 = re.match(r'^yt-(.*)$', video_id)
1204                 if mobj2 is not None:
1205                         self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1))
1206                         return
1207
1208                 # At this point we have a new video
1209                 self._downloader.increment_downloads()
1210
1211                 simple_title = mobj.group(2).decode('utf-8')
1212
1213                 # Retrieve video webpage to extract further information
1214                 request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id)
1215                 try:
1216                         self.report_download_webpage(video_id)
1217                         webpage = urllib2.urlopen(request).read()
1218                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1219                         self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
1220                         return
1221
1222                 # Extract URL, uploader and title from webpage
1223                 self.report_extraction(video_id)
1224                 mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
1225                 if mobj is not None:
1226                         mediaURL = urllib.unquote(mobj.group(1))
1227                         video_extension = mediaURL[-3:]
1228
1229                         # Extract gdaKey if available
1230                         mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
1231                         if mobj is None:
1232                                 video_url = mediaURL
1233                         else:
1234                                 gdaKey = mobj.group(1)
1235                                 video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
1236                 else:
1237                         mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
1238                         if mobj is None:
1239                                 self._downloader.trouble(u'ERROR: unable to extract media URL')
1240                                 return
1241                         vardict = parse_qs(mobj.group(1))
1242                         if 'mediaData' not in vardict:
1243                                 self._downloader.trouble(u'ERROR: unable to extract media URL')
1244                                 return
1245                         mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
1246                         if mobj is None:
1247                                 self._downloader.trouble(u'ERROR: unable to extract media URL')
1248                                 return
1249                         mediaURL = mobj.group(1).replace('\\/', '/')
1250                         video_extension = mediaURL[-3:]
1251                         video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2))
1252
1253                 mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
1254                 if mobj is None:
1255                         self._downloader.trouble(u'ERROR: unable to extract title')
1256                         return
1257                 video_title = mobj.group(1).decode('utf-8')
1258                 video_title = sanitize_title(video_title)
1259
1260                 mobj = re.search(r'(?ms)By:\s*<a .*?>(.+?)<', webpage)
1261                 if mobj is None:
1262                         self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
1263                         return
1264                 video_uploader = mobj.group(1)
1265
1266                 try:
1267                         # Process video information
1268                         self._downloader.process_info({
1269                                 'id':           video_id.decode('utf-8'),
1270                                 'url':          video_url.decode('utf-8'),
1271                                 'uploader':     video_uploader.decode('utf-8'),
1272                                 'upload_date':  u'NA',
1273                                 'title':        video_title,
1274                                 'stitle':       simple_title,
1275                                 'ext':          video_extension.decode('utf-8'),
1276                                 'format':       u'NA',
1277                                 'player_url':   None,
1278                         })
1279                 except UnavailableVideoError:
1280                         self._downloader.trouble(u'\nERROR: unable to download video')
1281
1282
1283 class DailymotionIE(InfoExtractor):
1284         """Information Extractor for Dailymotion"""
1285
1286         _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^_/]+)_([^/]+)'
1287
1288         def __init__(self, downloader=None):
1289                 InfoExtractor.__init__(self, downloader)
1290
1291         @staticmethod
1292         def suitable(url):
1293                 return (re.match(DailymotionIE._VALID_URL, url) is not None)
1294
1295         def report_download_webpage(self, video_id):
1296                 """Report webpage download."""
1297                 self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id)
1298
1299         def report_extraction(self, video_id):
1300                 """Report information extraction."""
1301                 self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
1302
1303         def _real_initialize(self):
1304                 return
1305
1306         def _real_extract(self, url):
1307                 # Extract id and simplified title from URL
1308                 mobj = re.match(self._VALID_URL, url)
1309                 if mobj is None:
1310                         self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
1311                         return
1312
1313                 # At this point we have a new video
1314                 self._downloader.increment_downloads()
1315                 video_id = mobj.group(1)
1316
1317                 simple_title = mobj.group(2).decode('utf-8')
1318                 video_extension = 'flv'
1319
1320                 # Retrieve video webpage to extract further information
1321                 request = urllib2.Request(url)
1322                 try:
1323                         self.report_download_webpage(video_id)
1324                         webpage = urllib2.urlopen(request).read()
1325                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1326                         self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
1327                         return
1328
1329                 # Extract URL, uploader and title from webpage
1330                 self.report_extraction(video_id)
1331                 mobj = re.search(r'(?i)addVariable\(\"video\"\s*,\s*\"([^\"]*)\"\)', webpage)
1332                 if mobj is None:
1333                         self._downloader.trouble(u'ERROR: unable to extract media URL')
1334                         return
1335                 mediaURL = urllib.unquote(mobj.group(1))
1336
1337                 # if needed add http://www.dailymotion.com/ if relative URL
1338
1339                 video_url = mediaURL
1340
1341                 # '<meta\s+name="title"\s+content="Dailymotion\s*[:\-]\s*(.*?)"\s*\/\s*>'
1342                 mobj = re.search(r'(?im)<title>Dailymotion\s*[\-:]\s*(.+?)</title>', webpage)
1343                 if mobj is None:
1344                         self._downloader.trouble(u'ERROR: unable to extract title')
1345                         return
1346                 video_title = mobj.group(1).decode('utf-8')
1347                 video_title = sanitize_title(video_title)
1348
1349                 mobj = re.search(r'(?im)<Attribute name="owner">(.+?)</Attribute>', webpage)
1350                 if mobj is None:
1351                         self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
1352                         return
1353                 video_uploader = mobj.group(1)
1354
1355                 try:
1356                         # Process video information
1357                         self._downloader.process_info({
1358                                 'id':           video_id.decode('utf-8'),
1359                                 'url':          video_url.decode('utf-8'),
1360                                 'uploader':     video_uploader.decode('utf-8'),
1361                                 'upload_date':  u'NA',
1362                                 'title':        video_title,
1363                                 'stitle':       simple_title,
1364                                 'ext':          video_extension.decode('utf-8'),
1365                                 'format':       u'NA',
1366                                 'player_url':   None,
1367                         })
1368                 except UnavailableVideoError:
1369                         self._downloader.trouble(u'\nERROR: unable to download video')
1370
1371 class GoogleIE(InfoExtractor):
1372         """Information extractor for video.google.com."""
1373
1374         _VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*'
1375
1376         def __init__(self, downloader=None):
1377                 InfoExtractor.__init__(self, downloader)
1378
1379         @staticmethod
1380         def suitable(url):
1381                 return (re.match(GoogleIE._VALID_URL, url) is not None)
1382
1383         def report_download_webpage(self, video_id):
1384                 """Report webpage download."""
1385                 self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id)
1386
1387         def report_extraction(self, video_id):
1388                 """Report information extraction."""
1389                 self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id)
1390
1391         def _real_initialize(self):
1392                 return
1393
1394         def _real_extract(self, url):
1395                 # Extract id from URL
1396                 mobj = re.match(self._VALID_URL, url)
1397                 if mobj is None:
1398                         self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
1399                         return
1400
1401                 # At this point we have a new video
1402                 self._downloader.increment_downloads()
1403                 video_id = mobj.group(1)
1404
1405                 video_extension = 'mp4'
1406
1407                 # Retrieve video webpage to extract further information
1408                 request = urllib2.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id)
1409                 try:
1410                         self.report_download_webpage(video_id)
1411                         webpage = urllib2.urlopen(request).read()
1412                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1413                         self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1414                         return
1415
1416                 # Extract URL, uploader, and title from webpage
1417                 self.report_extraction(video_id)
1418                 mobj = re.search(r"download_url:'([^']+)'", webpage)
1419                 if mobj is None:
1420                         video_extension = 'flv'
1421                         mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage)
1422                 if mobj is None:
1423                         self._downloader.trouble(u'ERROR: unable to extract media URL')
1424                         return
1425                 mediaURL = urllib.unquote(mobj.group(1))
1426                 mediaURL = mediaURL.replace('\\x3d', '\x3d')
1427                 mediaURL = mediaURL.replace('\\x26', '\x26')
1428
1429                 video_url = mediaURL
1430
1431                 mobj = re.search(r'<title>(.*)</title>', webpage)
1432                 if mobj is None:
1433                         self._downloader.trouble(u'ERROR: unable to extract title')
1434                         return
1435                 video_title = mobj.group(1).decode('utf-8')
1436                 video_title = sanitize_title(video_title)
1437                 simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
1438
1439                 # Extract video description
1440                 mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage)
1441                 if mobj is None:
1442                         self._downloader.trouble(u'ERROR: unable to extract video description')
1443                         return
1444                 video_description = mobj.group(1).decode('utf-8')
1445                 if not video_description:
1446                         video_description = 'No description available.'
1447
1448                 # Extract video thumbnail
1449                 if self._downloader.params.get('forcethumbnail', False):
1450                         request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id)))
1451                         try:
1452                                 webpage = urllib2.urlopen(request).read()
1453                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1454                                 self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1455                                 return
1456                         mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage)
1457                         if mobj is None:
1458                                 self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
1459                                 return
1460                         video_thumbnail = mobj.group(1)
1461                 else:   # we need something to pass to process_info
1462                         video_thumbnail = ''
1463
1464
1465                 try:
1466                         # Process video information
1467                         self._downloader.process_info({
1468                                 'id':           video_id.decode('utf-8'),
1469                                 'url':          video_url.decode('utf-8'),
1470                                 'uploader':     u'NA',
1471                                 'upload_date':  u'NA',
1472                                 'title':        video_title,
1473                                 'stitle':       simple_title,
1474                                 'ext':          video_extension.decode('utf-8'),
1475                                 'format':       u'NA',
1476                                 'player_url':   None,
1477                         })
1478                 except UnavailableVideoError:
1479                         self._downloader.trouble(u'\nERROR: unable to download video')
1480
1481
1482 class PhotobucketIE(InfoExtractor):
1483         """Information extractor for photobucket.com."""
1484
1485         _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
1486
1487         def __init__(self, downloader=None):
1488                 InfoExtractor.__init__(self, downloader)
1489
1490         @staticmethod
1491         def suitable(url):
1492                 return (re.match(PhotobucketIE._VALID_URL, url) is not None)
1493
1494         def report_download_webpage(self, video_id):
1495                 """Report webpage download."""
1496                 self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id)
1497
1498         def report_extraction(self, video_id):
1499                 """Report information extraction."""
1500                 self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id)
1501
1502         def _real_initialize(self):
1503                 return
1504
1505         def _real_extract(self, url):
1506                 # Extract id from URL
1507                 mobj = re.match(self._VALID_URL, url)
1508                 if mobj is None:
1509                         self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
1510                         return
1511
1512                 # At this point we have a new video
1513                 self._downloader.increment_downloads()
1514                 video_id = mobj.group(1)
1515
1516                 video_extension = 'flv'
1517
1518                 # Retrieve video webpage to extract further information
1519                 request = urllib2.Request(url)
1520                 try:
1521                         self.report_download_webpage(video_id)
1522                         webpage = urllib2.urlopen(request).read()
1523                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1524                         self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1525                         return
1526
1527                 # Extract URL, uploader, and title from webpage
1528                 self.report_extraction(video_id)
1529                 mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
1530                 if mobj is None:
1531                         self._downloader.trouble(u'ERROR: unable to extract media URL')
1532                         return
1533                 mediaURL = urllib.unquote(mobj.group(1))
1534
1535                 video_url = mediaURL
1536
1537                 mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
1538                 if mobj is None:
1539                         self._downloader.trouble(u'ERROR: unable to extract title')
1540                         return
1541                 video_title = mobj.group(1).decode('utf-8')
1542                 video_title = sanitize_title(video_title)
1543                 simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
1544
1545                 video_uploader = mobj.group(2).decode('utf-8')
1546
1547                 try:
1548                         # Process video information
1549                         self._downloader.process_info({
1550                                 'id':           video_id.decode('utf-8'),
1551                                 'url':          video_url.decode('utf-8'),
1552                                 'uploader':     video_uploader,
1553                                 'upload_date':  u'NA',
1554                                 'title':        video_title,
1555                                 'stitle':       simple_title,
1556                                 'ext':          video_extension.decode('utf-8'),
1557                                 'format':       u'NA',
1558                                 'player_url':   None,
1559                         })
1560                 except UnavailableVideoError:
1561                         self._downloader.trouble(u'\nERROR: unable to download video')
1562
1563
1564 class YahooIE(InfoExtractor):
1565         """Information extractor for video.yahoo.com."""
1566
1567         # _VALID_URL matches all Yahoo! Video URLs
1568         # _VPAGE_URL matches only the extractable '/watch/' URLs
1569         _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
1570         _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
1571
1572         def __init__(self, downloader=None):
1573                 InfoExtractor.__init__(self, downloader)
1574
1575         @staticmethod
1576         def suitable(url):
1577                 return (re.match(YahooIE._VALID_URL, url) is not None)
1578
1579         def report_download_webpage(self, video_id):
1580                 """Report webpage download."""
1581                 self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id)
1582
1583         def report_extraction(self, video_id):
1584                 """Report information extraction."""
1585                 self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id)
1586
1587         def _real_initialize(self):
1588                 return
1589
1590         def _real_extract(self, url, new_video=True):
1591                 # Extract ID from URL
1592                 mobj = re.match(self._VALID_URL, url)
1593                 if mobj is None:
1594                         self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
1595                         return
1596
1597                 # At this point we have a new video
1598                 self._downloader.increment_downloads()
1599                 video_id = mobj.group(2)
1600                 video_extension = 'flv'
1601
1602                 # Rewrite valid but non-extractable URLs as
1603                 # extractable English language /watch/ URLs
1604                 if re.match(self._VPAGE_URL, url) is None:
1605                         request = urllib2.Request(url)
1606                         try:
1607                                 webpage = urllib2.urlopen(request).read()
1608                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1609                                 self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1610                                 return
1611
1612                         mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
1613                         if mobj is None:
1614                                 self._downloader.trouble(u'ERROR: Unable to extract id field')
1615                                 return
1616                         yahoo_id = mobj.group(1)
1617
1618                         mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
1619                         if mobj is None:
1620                                 self._downloader.trouble(u'ERROR: Unable to extract vid field')
1621                                 return
1622                         yahoo_vid = mobj.group(1)
1623
1624                         url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
1625                         return self._real_extract(url, new_video=False)
1626
1627                 # Retrieve video webpage to extract further information
1628                 request = urllib2.Request(url)
1629                 try:
1630                         self.report_download_webpage(video_id)
1631                         webpage = urllib2.urlopen(request).read()
1632                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1633                         self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1634                         return
1635
1636                 # Extract uploader and title from webpage
1637                 self.report_extraction(video_id)
1638                 mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
1639                 if mobj is None:
1640                         self._downloader.trouble(u'ERROR: unable to extract video title')
1641                         return
1642                 video_title = mobj.group(1).decode('utf-8')
1643                 simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
1644
1645                 mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
1646                 if mobj is None:
1647                         self._downloader.trouble(u'ERROR: unable to extract video uploader')
1648                         return
1649                 video_uploader = mobj.group(1).decode('utf-8')
1650
1651                 # Extract video thumbnail
1652                 mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
1653                 if mobj is None:
1654                         self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
1655                         return
1656                 video_thumbnail = mobj.group(1).decode('utf-8')
1657
1658                 # Extract video description
1659                 mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
1660                 if mobj is None:
1661                         self._downloader.trouble(u'ERROR: unable to extract video description')
1662                         return
1663                 video_description = mobj.group(1).decode('utf-8')
1664                 if not video_description: video_description = 'No description available.'
1665
1666                 # Extract video height and width
1667                 mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
1668                 if mobj is None:
1669                         self._downloader.trouble(u'ERROR: unable to extract video height')
1670                         return
1671                 yv_video_height = mobj.group(1)
1672
1673                 mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
1674                 if mobj is None:
1675                         self._downloader.trouble(u'ERROR: unable to extract video width')
1676                         return
1677                 yv_video_width = mobj.group(1)
1678
1679                 # Retrieve video playlist to extract media URL
1680                 # I'm not completely sure what all these options are, but we
1681                 # seem to need most of them, otherwise the server sends a 401.
1682                 yv_lg = 'R0xx6idZnW2zlrKP8xxAIR'  # not sure what this represents
1683                 yv_bitrate = '700'  # according to Wikipedia this is hard-coded
1684                 request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
1685                                           '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
1686                                           '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
1687                 try:
1688                         self.report_download_webpage(video_id)
1689                         webpage = urllib2.urlopen(request).read()
1690                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1691                         self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1692                         return
1693
1694                 # Extract media URL from playlist XML
1695                 mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
1696                 if mobj is None:
1697                         self._downloader.trouble(u'ERROR: Unable to extract media URL')
1698                         return
1699                 video_url = urllib.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
1700                 video_url = re.sub(r'(?u)&(.+?);', htmlentity_transform, video_url)
1701
1702                 try:
1703                         # Process video information
1704                         self._downloader.process_info({
1705                                 'id':           video_id.decode('utf-8'),
1706                                 'url':          video_url,
1707                                 'uploader':     video_uploader,
1708                                 'upload_date':  u'NA',
1709                                 'title':        video_title,
1710                                 'stitle':       simple_title,
1711                                 'ext':          video_extension.decode('utf-8'),
1712                                 'thumbnail':    video_thumbnail.decode('utf-8'),
1713                                 'description':  video_description,
1714                                 'thumbnail':    video_thumbnail,
1715                                 'description':  video_description,
1716                                 'player_url':   None,
1717                         })
1718                 except UnavailableVideoError:
1719                         self._downloader.trouble(u'\nERROR: unable to download video')
1720
1721
1722 class VimeoIE(InfoExtractor):
1723         """Information extractor for vimeo.com."""
1724
1725         # _VALID_URL matches Vimeo URLs
1726         _VALID_URL = r'(?:http://)?(?:www.)?vimeo\.com/([0-9]+)'
1727
1728         def __init__(self, downloader=None):
1729                 InfoExtractor.__init__(self, downloader)
1730
1731         @staticmethod
1732         def suitable(url):
1733                 return (re.match(VimeoIE._VALID_URL, url) is not None)
1734
1735         def report_download_webpage(self, video_id):
1736                 """Report webpage download."""
1737                 self._downloader.to_screen(u'[video.vimeo] %s: Downloading webpage' % video_id)
1738
1739         def report_extraction(self, video_id):
1740                 """Report information extraction."""
1741                 self._downloader.to_screen(u'[video.vimeo] %s: Extracting information' % video_id)
1742
1743         def _real_initialize(self):
1744                 return
1745
1746         def _real_extract(self, url, new_video=True):
1747                 # Extract ID from URL
1748                 mobj = re.match(self._VALID_URL, url)
1749                 if mobj is None:
1750                         self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
1751                         return
1752
1753                 # At this point we have a new video
1754                 self._downloader.increment_downloads()
1755                 video_id = mobj.group(1)
1756                 video_extension = 'flv' # FIXME
1757
1758                 # Retrieve video webpage to extract further information
1759                 request = urllib2.Request("http://vimeo.com/moogaloop/load/clip:%s" % video_id, None, std_headers)
1760                 try:
1761                         self.report_download_webpage(video_id)
1762                         webpage = urllib2.urlopen(request).read()
1763                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1764                         self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1765                         return
1766
1767                 # Now we begin extracting as much information as we can from what we
1768                 # retrieved. First we extract the information common to all extractors,
1769                 # and latter we extract those that are Vimeo specific.
1770                 self.report_extraction(video_id)
1771
1772                 # Extract title
1773                 mobj = re.search(r'<caption>(.*?)</caption>', webpage)
1774                 if mobj is None:
1775                         self._downloader.trouble(u'ERROR: unable to extract video title')
1776                         return
1777                 video_title = mobj.group(1).decode('utf-8')
1778                 simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
1779
1780                 # Extract uploader
1781                 mobj = re.search(r'<uploader_url>http://vimeo.com/(.*?)</uploader_url>', webpage)
1782                 if mobj is None:
1783                         self._downloader.trouble(u'ERROR: unable to extract video uploader')
1784                         return
1785                 video_uploader = mobj.group(1).decode('utf-8')
1786
1787                 # Extract video thumbnail
1788                 mobj = re.search(r'<thumbnail>(.*?)</thumbnail>', webpage)
1789                 if mobj is None:
1790                         self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
1791                         return
1792                 video_thumbnail = mobj.group(1).decode('utf-8')
1793
1794                 # # Extract video description
1795                 # mobj = re.search(r'<meta property="og:description" content="(.*)" />', webpage)
1796                 # if mobj is None:
1797                 #       self._downloader.trouble(u'ERROR: unable to extract video description')
1798                 #       return
1799                 # video_description = mobj.group(1).decode('utf-8')
1800                 # if not video_description: video_description = 'No description available.'
1801                 video_description = 'Foo.'
1802
1803                 # Vimeo specific: extract request signature
1804                 mobj = re.search(r'<request_signature>(.*?)</request_signature>', webpage)
1805                 if mobj is None:
1806                         self._downloader.trouble(u'ERROR: unable to extract request signature')
1807                         return
1808                 sig = mobj.group(1).decode('utf-8')
1809
1810                 # Vimeo specific: Extract request signature expiration
1811                 mobj = re.search(r'<request_signature_expires>(.*?)</request_signature_expires>', webpage)
1812                 if mobj is None:
1813                         self._downloader.trouble(u'ERROR: unable to extract request signature expiration')
1814                         return
1815                 sig_exp = mobj.group(1).decode('utf-8')
1816
1817                 video_url = "http://vimeo.com/moogaloop/play/clip:%s/%s/%s" % (video_id, sig, sig_exp)
1818
1819                 try:
1820                         # Process video information
1821                         self._downloader.process_info({
1822                                 'id':           video_id.decode('utf-8'),
1823                                 'url':          video_url,
1824                                 'uploader':     video_uploader,
1825                                 'upload_date':  u'NA',
1826                                 'title':        video_title,
1827                                 'stitle':       simple_title,
1828                                 'ext':          video_extension.decode('utf-8'),
1829                                 'thumbnail':    video_thumbnail.decode('utf-8'),
1830                                 'description':  video_description,
1831                                 'thumbnail':    video_thumbnail,
1832                                 'description':  video_description,
1833                                 'player_url':   None,
1834                         })
1835                 except UnavailableVideoError:
1836                         self._downloader.trouble(u'ERROR: unable to download video')
1837
1838
1839 class GenericIE(InfoExtractor):
1840         """Generic last-resort information extractor."""
1841
1842         def __init__(self, downloader=None):
1843                 InfoExtractor.__init__(self, downloader)
1844
1845         @staticmethod
1846         def suitable(url):
1847                 return True
1848
1849         def report_download_webpage(self, video_id):
1850                 """Report webpage download."""
1851                 self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
1852                 self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id)
1853
1854         def report_extraction(self, video_id):
1855                 """Report information extraction."""
1856                 self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id)
1857
1858         def _real_initialize(self):
1859                 return
1860
1861         def _real_extract(self, url):
1862                 # At this point we have a new video
1863                 self._downloader.increment_downloads()
1864
1865                 video_id = url.split('/')[-1]
1866                 request = urllib2.Request(url)
1867                 try:
1868                         self.report_download_webpage(video_id)
1869                         webpage = urllib2.urlopen(request).read()
1870                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
1871                         self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
1872                         return
1873                 except ValueError, err:
1874                         # since this is the last-resort InfoExtractor, if
1875                         # this error is thrown, it'll be thrown here
1876                         self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
1877                         return
1878
1879                 self.report_extraction(video_id)
1880                 # Start with something easy: JW Player in SWFObject
1881                 mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
1882                 if mobj is None:
1883                         # Broaden the search a little bit
1884                         mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
1885                 if mobj is None:
1886                         self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
1887                         return
1888
1889                 # It's possible that one of the regexes
1890                 # matched, but returned an empty group:
1891                 if mobj.group(1) is None:
1892                         self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
1893                         return
1894
1895                 video_url = urllib.unquote(mobj.group(1))
1896                 video_id  = os.path.basename(video_url)
1897
1898                 # here's a fun little line of code for you:
1899                 video_extension = os.path.splitext(video_id)[1][1:]
1900                 video_id        = os.path.splitext(video_id)[0]
1901
1902                 # it's tempting to parse this further, but you would
1903                 # have to take into account all the variations like
1904                 #   Video Title - Site Name
1905                 #   Site Name | Video Title
1906                 #   Video Title - Tagline | Site Name
1907                 # and so on and so forth; it's just not practical
1908                 mobj = re.search(r'<title>(.*)</title>', webpage)
1909                 if mobj is None:
1910                         self._downloader.trouble(u'ERROR: unable to extract title')
1911                         return
1912                 video_title = mobj.group(1).decode('utf-8')
1913                 video_title = sanitize_title(video_title)
1914                 simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
1915
1916                 # video uploader is domain name
1917                 mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
1918                 if mobj is None:
1919                         self._downloader.trouble(u'ERROR: unable to extract title')
1920                         return
1921                 video_uploader = mobj.group(1).decode('utf-8')
1922
1923                 try:
1924                         # Process video information
1925                         self._downloader.process_info({
1926                                 'id':           video_id.decode('utf-8'),
1927                                 'url':          video_url.decode('utf-8'),
1928                                 'uploader':     video_uploader,
1929                                 'upload_date':  u'NA',
1930                                 'title':        video_title,
1931                                 'stitle':       simple_title,
1932                                 'ext':          video_extension.decode('utf-8'),
1933                                 'format':       u'NA',
1934                                 'player_url':   None,
1935                         })
1936                 except UnavailableVideoError, err:
1937                         self._downloader.trouble(u'\nERROR: unable to download video')
1938
1939
1940 class YoutubeSearchIE(InfoExtractor):
1941         """Information Extractor for YouTube search queries."""
1942         _VALID_QUERY = r'ytsearch(\d+|all)?:[\s\S]+'
1943         _TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en'
1944         _VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
1945         _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
1946         _youtube_ie = None
1947         _max_youtube_results = 1000
1948
1949         def __init__(self, youtube_ie, downloader=None):
1950                 InfoExtractor.__init__(self, downloader)
1951                 self._youtube_ie = youtube_ie
1952
1953         @staticmethod
1954         def suitable(url):
1955                 return (re.match(YoutubeSearchIE._VALID_QUERY, url) is not None)
1956
1957         def report_download_page(self, query, pagenum):
1958                 """Report attempt to download playlist page with given number."""
1959                 query = query.decode(preferredencoding())
1960                 self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
1961
1962         def _real_initialize(self):
1963                 self._youtube_ie.initialize()
1964
1965         def _real_extract(self, query):
1966                 mobj = re.match(self._VALID_QUERY, query)
1967                 if mobj is None:
1968                         self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
1969                         return
1970
1971                 prefix, query = query.split(':')
1972                 prefix = prefix[8:]
1973                 query  = query.encode('utf-8')
1974                 if prefix == '':
1975                         self._download_n_results(query, 1)
1976                         return
1977                 elif prefix == 'all':
1978                         self._download_n_results(query, self._max_youtube_results)
1979                         return
1980                 else:
1981                         try:
1982                                 n = long(prefix)
1983                                 if n <= 0:
1984                                         self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
1985                                         return
1986                                 elif n > self._max_youtube_results:
1987                                         self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)'  % (self._max_youtube_results, n))
1988                                         n = self._max_youtube_results
1989                                 self._download_n_results(query, n)
1990                                 return
1991                         except ValueError: # parsing prefix as integer fails
1992                                 self._download_n_results(query, 1)
1993                                 return
1994
1995         def _download_n_results(self, query, n):
1996                 """Downloads a specified number of results for a query"""
1997
1998                 video_ids = []
1999                 already_seen = set()
2000                 pagenum = 1
2001
2002                 while True:
2003                         self.report_download_page(query, pagenum)
2004                         result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
2005                         request = urllib2.Request(result_url)
2006                         try:
2007                                 page = urllib2.urlopen(request).read()
2008                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2009                                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
2010                                 return
2011
2012                         # Extract video identifiers
2013                         for mobj in re.finditer(self._VIDEO_INDICATOR, page):
2014                                 video_id = page[mobj.span()[0]:mobj.span()[1]].split('=')[2][:-1]
2015                                 if video_id not in already_seen:
2016                                         video_ids.append(video_id)
2017                                         already_seen.add(video_id)
2018                                         if len(video_ids) == n:
2019                                                 # Specified n videos reached
2020                                                 for id in video_ids:
2021                                                         self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
2022                                                 return
2023
2024                         if re.search(self._MORE_PAGES_INDICATOR, page) is None:
2025                                 for id in video_ids:
2026                                         self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
2027                                 return
2028
2029                         pagenum = pagenum + 1
2030
2031 class GoogleSearchIE(InfoExtractor):
2032         """Information Extractor for Google Video search queries."""
2033         _VALID_QUERY = r'gvsearch(\d+|all)?:[\s\S]+'
2034         _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
2035         _VIDEO_INDICATOR = r'videoplay\?docid=([^\&>]+)\&'
2036         _MORE_PAGES_INDICATOR = r'<span>Next</span>'
2037         _google_ie = None
2038         _max_google_results = 1000
2039
2040         def __init__(self, google_ie, downloader=None):
2041                 InfoExtractor.__init__(self, downloader)
2042                 self._google_ie = google_ie
2043
2044         @staticmethod
2045         def suitable(url):
2046                 return (re.match(GoogleSearchIE._VALID_QUERY, url) is not None)
2047
2048         def report_download_page(self, query, pagenum):
2049                 """Report attempt to download playlist page with given number."""
2050                 query = query.decode(preferredencoding())
2051                 self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum))
2052
2053         def _real_initialize(self):
2054                 self._google_ie.initialize()
2055
2056         def _real_extract(self, query):
2057                 mobj = re.match(self._VALID_QUERY, query)
2058                 if mobj is None:
2059                         self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
2060                         return
2061
2062                 prefix, query = query.split(':')
2063                 prefix = prefix[8:]
2064                 query  = query.encode('utf-8')
2065                 if prefix == '':
2066                         self._download_n_results(query, 1)
2067                         return
2068                 elif prefix == 'all':
2069                         self._download_n_results(query, self._max_google_results)
2070                         return
2071                 else:
2072                         try:
2073                                 n = long(prefix)
2074                                 if n <= 0:
2075                                         self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
2076                                         return
2077                                 elif n > self._max_google_results:
2078                                         self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)'  % (self._max_google_results, n))
2079                                         n = self._max_google_results
2080                                 self._download_n_results(query, n)
2081                                 return
2082                         except ValueError: # parsing prefix as integer fails
2083                                 self._download_n_results(query, 1)
2084                                 return
2085
2086         def _download_n_results(self, query, n):
2087                 """Downloads a specified number of results for a query"""
2088
2089                 video_ids = []
2090                 already_seen = set()
2091                 pagenum = 1
2092
2093                 while True:
2094                         self.report_download_page(query, pagenum)
2095                         result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
2096                         request = urllib2.Request(result_url)
2097                         try:
2098                                 page = urllib2.urlopen(request).read()
2099                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2100                                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
2101                                 return
2102
2103                         # Extract video identifiers
2104                         for mobj in re.finditer(self._VIDEO_INDICATOR, page):
2105                                 video_id = mobj.group(1)
2106                                 if video_id not in already_seen:
2107                                         video_ids.append(video_id)
2108                                         already_seen.add(video_id)
2109                                         if len(video_ids) == n:
2110                                                 # Specified n videos reached
2111                                                 for id in video_ids:
2112                                                         self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id)
2113                                                 return
2114
2115                         if re.search(self._MORE_PAGES_INDICATOR, page) is None:
2116                                 for id in video_ids:
2117                                         self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id)
2118                                 return
2119
2120                         pagenum = pagenum + 1
2121
2122 class YahooSearchIE(InfoExtractor):
2123         """Information Extractor for Yahoo! Video search queries."""
2124         _VALID_QUERY = r'yvsearch(\d+|all)?:[\s\S]+'
2125         _TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
2126         _VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
2127         _MORE_PAGES_INDICATOR = r'\s*Next'
2128         _yahoo_ie = None
2129         _max_yahoo_results = 1000
2130
2131         def __init__(self, yahoo_ie, downloader=None):
2132                 InfoExtractor.__init__(self, downloader)
2133                 self._yahoo_ie = yahoo_ie
2134
2135         @staticmethod
2136         def suitable(url):
2137                 return (re.match(YahooSearchIE._VALID_QUERY, url) is not None)
2138
2139         def report_download_page(self, query, pagenum):
2140                 """Report attempt to download playlist page with given number."""
2141                 query = query.decode(preferredencoding())
2142                 self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
2143
2144         def _real_initialize(self):
2145                 self._yahoo_ie.initialize()
2146
2147         def _real_extract(self, query):
2148                 mobj = re.match(self._VALID_QUERY, query)
2149                 if mobj is None:
2150                         self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
2151                         return
2152
2153                 prefix, query = query.split(':')
2154                 prefix = prefix[8:]
2155                 query  = query.encode('utf-8')
2156                 if prefix == '':
2157                         self._download_n_results(query, 1)
2158                         return
2159                 elif prefix == 'all':
2160                         self._download_n_results(query, self._max_yahoo_results)
2161                         return
2162                 else:
2163                         try:
2164                                 n = long(prefix)
2165                                 if n <= 0:
2166                                         self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
2167                                         return
2168                                 elif n > self._max_yahoo_results:
2169                                         self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)'  % (self._max_yahoo_results, n))
2170                                         n = self._max_yahoo_results
2171                                 self._download_n_results(query, n)
2172                                 return
2173                         except ValueError: # parsing prefix as integer fails
2174                                 self._download_n_results(query, 1)
2175                                 return
2176
2177         def _download_n_results(self, query, n):
2178                 """Downloads a specified number of results for a query"""
2179
2180                 video_ids = []
2181                 already_seen = set()
2182                 pagenum = 1
2183
2184                 while True:
2185                         self.report_download_page(query, pagenum)
2186                         result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
2187                         request = urllib2.Request(result_url)
2188                         try:
2189                                 page = urllib2.urlopen(request).read()
2190                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2191                                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
2192                                 return
2193
2194                         # Extract video identifiers
2195                         for mobj in re.finditer(self._VIDEO_INDICATOR, page):
2196                                 video_id = mobj.group(1)
2197                                 if video_id not in already_seen:
2198                                         video_ids.append(video_id)
2199                                         already_seen.add(video_id)
2200                                         if len(video_ids) == n:
2201                                                 # Specified n videos reached
2202                                                 for id in video_ids:
2203                                                         self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id)
2204                                                 return
2205
2206                         if re.search(self._MORE_PAGES_INDICATOR, page) is None:
2207                                 for id in video_ids:
2208                                         self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id)
2209                                 return
2210
2211                         pagenum = pagenum + 1
2212
2213 class YoutubePlaylistIE(InfoExtractor):
2214         """Information Extractor for YouTube playlists."""
2215
2216         _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists|artist)\?.*?(p|a)=|user/.*?/user/|p/)([^&]+).*'
2217         _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
2218         _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
2219         _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
2220         _youtube_ie = None
2221
2222         def __init__(self, youtube_ie, downloader=None):
2223                 InfoExtractor.__init__(self, downloader)
2224                 self._youtube_ie = youtube_ie
2225
2226         @staticmethod
2227         def suitable(url):
2228                 return (re.match(YoutubePlaylistIE._VALID_URL, url) is not None)
2229
2230         def report_download_page(self, playlist_id, pagenum):
2231                 """Report attempt to download playlist page with given number."""
2232                 self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
2233
2234         def _real_initialize(self):
2235                 self._youtube_ie.initialize()
2236
2237         def _real_extract(self, url):
2238                 # Extract playlist id
2239                 mobj = re.match(self._VALID_URL, url)
2240                 if mobj is None:
2241                         self._downloader.trouble(u'ERROR: invalid url: %s' % url)
2242                         return
2243
2244                 # Download playlist pages
2245                 # prefix is 'p' as default for playlists but there are other types that need extra care
2246                 playlist_prefix = mobj.group(1)
2247                 if playlist_prefix == 'a':
2248                         playlist_access = 'artist'
2249                 else:
2250                         playlist_access = 'view_play_list'
2251                 playlist_id = mobj.group(2)
2252                 video_ids = []
2253                 pagenum = 1
2254
2255                 while True:
2256                         self.report_download_page(playlist_id, pagenum)
2257                         request = urllib2.Request(self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum))
2258                         try:
2259                                 page = urllib2.urlopen(request).read()
2260                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2261                                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
2262                                 return
2263
2264                         # Extract video identifiers
2265                         ids_in_page = []
2266                         for mobj in re.finditer(self._VIDEO_INDICATOR, page):
2267                                 if mobj.group(1) not in ids_in_page:
2268                                         ids_in_page.append(mobj.group(1))
2269                         video_ids.extend(ids_in_page)
2270
2271                         if re.search(self._MORE_PAGES_INDICATOR, page) is None:
2272                                 break
2273                         pagenum = pagenum + 1
2274
2275                 playliststart = self._downloader.params.get('playliststart', 1) - 1
2276                 playlistend = self._downloader.params.get('playlistend', -1)
2277                 video_ids = video_ids[playliststart:playlistend]
2278
2279                 for id in video_ids:
2280                         self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
2281                 return
2282
2283 class YoutubeUserIE(InfoExtractor):
2284         """Information Extractor for YouTube users."""
2285
2286         _VALID_URL = r'(?:(?:(?:http://)?(?:\w+\.)?youtube.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
2287         _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
2288         _GDATA_PAGE_SIZE = 50
2289         _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
2290         _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
2291         _youtube_ie = None
2292
2293         def __init__(self, youtube_ie, downloader=None):
2294                 InfoExtractor.__init__(self, downloader)
2295                 self._youtube_ie = youtube_ie
2296
2297         @staticmethod
2298         def suitable(url):
2299                 return (re.match(YoutubeUserIE._VALID_URL, url) is not None)
2300
2301         def report_download_page(self, username, start_index):
2302                 """Report attempt to download user page."""
2303                 self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
2304                                            (username, start_index, start_index + self._GDATA_PAGE_SIZE))
2305
2306         def _real_initialize(self):
2307                 self._youtube_ie.initialize()
2308
2309         def _real_extract(self, url):
2310                 # Extract username
2311                 mobj = re.match(self._VALID_URL, url)
2312                 if mobj is None:
2313                         self._downloader.trouble(u'ERROR: invalid url: %s' % url)
2314                         return
2315
2316                 username = mobj.group(1)
2317
2318                 # Download video ids using YouTube Data API. Result size per
2319                 # query is limited (currently to 50 videos) so we need to query
2320                 # page by page until there are no video ids - it means we got
2321                 # all of them.
2322
2323                 video_ids = []
2324                 pagenum = 0
2325
2326                 while True:
2327                         start_index = pagenum * self._GDATA_PAGE_SIZE + 1
2328                         self.report_download_page(username, start_index)
2329
2330                         request = urllib2.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index))
2331
2332                         try:
2333                                 page = urllib2.urlopen(request).read()
2334                         except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2335                                 self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
2336                                 return
2337
2338                         # Extract video identifiers
2339                         ids_in_page = []
2340
2341                         for mobj in re.finditer(self._VIDEO_INDICATOR, page):
2342                                 if mobj.group(1) not in ids_in_page:
2343                                         ids_in_page.append(mobj.group(1))
2344
2345                         video_ids.extend(ids_in_page)
2346
2347                         # A little optimization - if current page is not
2348                         # "full", ie. does not contain PAGE_SIZE video ids then
2349                         # we can assume that this page is the last one - there
2350                         # are no more ids on further pages - no need to query
2351                         # again.
2352
2353                         if len(ids_in_page) < self._GDATA_PAGE_SIZE:
2354                                 break
2355
2356                         pagenum += 1
2357
2358                 all_ids_count = len(video_ids)
2359                 playliststart = self._downloader.params.get('playliststart', 1) - 1
2360                 playlistend = self._downloader.params.get('playlistend', -1)
2361
2362                 if playlistend == -1:
2363                         video_ids = video_ids[playliststart:]
2364                 else:
2365                         video_ids = video_ids[playliststart:playlistend]
2366                         
2367                 self._downloader.to_screen("[youtube] user %s: Collected %d video ids (downloading %d of them)" %
2368                                            (username, all_ids_count, len(video_ids)))
2369
2370                 for video_id in video_ids:
2371                         self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % video_id)
2372
2373
2374 class DepositFilesIE(InfoExtractor):
2375         """Information extractor for depositfiles.com"""
2376
2377         _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles.com/(?:../(?#locale))?files/(.+)'
2378
2379         def __init__(self, downloader=None):
2380                 InfoExtractor.__init__(self, downloader)
2381
2382         @staticmethod
2383         def suitable(url):
2384                 return (re.match(DepositFilesIE._VALID_URL, url) is not None)
2385
2386         def report_download_webpage(self, file_id):
2387                 """Report webpage download."""
2388                 self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id)
2389
2390         def report_extraction(self, file_id):
2391                 """Report information extraction."""
2392                 self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id)
2393
2394         def _real_initialize(self):
2395                 return
2396
2397         def _real_extract(self, url):
2398                 # At this point we have a new file
2399                 self._downloader.increment_downloads()
2400
2401                 file_id = url.split('/')[-1]
2402                 # Rebuild url in english locale
2403                 url = 'http://depositfiles.com/en/files/' + file_id
2404
2405                 # Retrieve file webpage with 'Free download' button pressed
2406                 free_download_indication = { 'gateway_result' : '1' }
2407                 request = urllib2.Request(url, urllib.urlencode(free_download_indication))
2408                 try:
2409                         self.report_download_webpage(file_id)
2410                         webpage = urllib2.urlopen(request).read()
2411                 except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2412                         self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % str(err))
2413                         return
2414
2415                 # Search for the real file URL
2416                 mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
2417                 if (mobj is None) or (mobj.group(1) is None):
2418                         # Try to figure out reason of the error.
2419                         mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
2420                         if (mobj is not None) and (mobj.group(1) is not None):
2421                                 restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
2422                                 self._downloader.trouble(u'ERROR: %s' % restriction_message)
2423                         else:
2424                                 self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url)
2425                         return
2426
2427                 file_url = mobj.group(1)
2428                 file_extension = os.path.splitext(file_url)[1][1:]
2429
2430                 # Search for file title
2431                 mobj = re.search(r'<b title="(.*?)">', webpage)
2432                 if mobj is None:
2433                         self._downloader.trouble(u'ERROR: unable to extract title')
2434                         return
2435                 file_title = mobj.group(1).decode('utf-8')
2436
2437                 try:
2438                         # Process file information
2439                         self._downloader.process_info({
2440                                 'id':           file_id.decode('utf-8'),
2441                                 'url':          file_url.decode('utf-8'),
2442                                 'uploader':     u'NA',
2443                                 'upload_date':  u'NA',
2444                                 'title':        file_title,
2445                                 'stitle':       file_title,
2446                                 'ext':          file_extension.decode('utf-8'),
2447                                 'format':       u'NA',
2448                                 'player_url':   None,
2449                         })
2450                 except UnavailableVideoError, err:
2451                         self._downloader.trouble(u'ERROR: unable to download file')
2452
2453 class PostProcessor(object):
2454         """Post Processor class.
2455
2456         PostProcessor objects can be added to downloaders with their
2457         add_post_processor() method. When the downloader has finished a
2458         successful download, it will take its internal chain of PostProcessors
2459         and start calling the run() method on each one of them, first with
2460         an initial argument and then with the returned value of the previous
2461         PostProcessor.
2462
2463         The chain will be stopped if one of them ever returns None or the end
2464         of the chain is reached.
2465
2466         PostProcessor objects follow a "mutual registration" process similar
2467         to InfoExtractor objects.
2468         """
2469
2470         _downloader = None
2471
2472         def __init__(self, downloader=None):
2473                 self._downloader = downloader
2474
2475         def set_downloader(self, downloader):
2476                 """Sets the downloader for this PP."""
2477                 self._downloader = downloader
2478
2479         def run(self, information):
2480                 """Run the PostProcessor.
2481
2482                 The "information" argument is a dictionary like the ones
2483                 composed by InfoExtractors. The only difference is that this
2484                 one has an extra field called "filepath" that points to the
2485                 downloaded file.
2486
2487                 When this method returns None, the postprocessing chain is
2488                 stopped. However, this method may return an information
2489                 dictionary that will be passed to the next postprocessing
2490                 object in the chain. It can be the one it received after
2491                 changing some fields.
2492
2493                 In addition, this method may raise a PostProcessingError
2494                 exception that will be taken into account by the downloader
2495                 it was called from.
2496                 """
2497                 return information # by default, do nothing
2498
2499 ### MAIN PROGRAM ###
2500 if __name__ == '__main__':
2501         try:
2502                 # Modules needed only when running the main program
2503                 import getpass
2504                 import optparse
2505
2506                 # Function to update the program file with the latest version from the repository.
2507                 def update_self(downloader, filename):
2508                         # Note: downloader only used for options
2509                         if not os.access(filename, os.W_OK):
2510                                 sys.exit('ERROR: no write permissions on %s' % filename)
2511
2512                         downloader.to_screen('Updating to latest stable version...')
2513                         try:
2514                                 latest_url = 'http://github.com/rg3/youtube-dl/raw/master/LATEST_VERSION'
2515                                 latest_version = urllib.urlopen(latest_url).read().strip()
2516                                 prog_url = 'http://github.com/rg3/youtube-dl/raw/%s/youtube-dl' % latest_version
2517                                 newcontent = urllib.urlopen(prog_url).read()
2518                         except (IOError, OSError), err:
2519                                 sys.exit('ERROR: unable to download latest version')
2520                         try:
2521                                 stream = open(filename, 'w')
2522                                 stream.write(newcontent)
2523                                 stream.close()
2524                         except (IOError, OSError), err:
2525                                 sys.exit('ERROR: unable to overwrite current version')
2526                         downloader.to_screen('Updated to version %s' % latest_version)
2527
2528                 # Parse command line
2529                 parser = optparse.OptionParser(
2530                         usage='Usage: %prog [options] url...',
2531                         version='2011.01.30',
2532                         conflict_handler='resolve',
2533                 )
2534
2535                 parser.add_option('-h', '--help',
2536                                 action='help', help='print this help text and exit')
2537                 parser.add_option('-v', '--version',
2538                                 action='version', help='print program version and exit')
2539                 parser.add_option('-U', '--update',
2540                                 action='store_true', dest='update_self', help='update this program to latest stable version')
2541                 parser.add_option('-i', '--ignore-errors',
2542                                 action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
2543                 parser.add_option('-r', '--rate-limit',
2544                                 dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
2545                 parser.add_option('-R', '--retries',
2546                                 dest='retries', metavar='RETRIES', help='number of retries (default is 10)', default=10)
2547                 parser.add_option('--playlist-start',
2548                                 dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is 1)', default=1)
2549                 parser.add_option('--playlist-end',
2550                                 dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
2551                 parser.add_option('--dump-user-agent',
2552                                 action='store_true', dest='dump_user_agent',
2553                                 help='display the current browser identification', default=False)
2554
2555                 authentication = optparse.OptionGroup(parser, 'Authentication Options')
2556                 authentication.add_option('-u', '--username',
2557                                 dest='username', metavar='USERNAME', help='account username')
2558                 authentication.add_option('-p', '--password',
2559                                 dest='password', metavar='PASSWORD', help='account password')
2560                 authentication.add_option('-n', '--netrc',
2561                                 action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
2562                 parser.add_option_group(authentication)
2563
2564                 video_format = optparse.OptionGroup(parser, 'Video Format Options')
2565                 video_format.add_option('-f', '--format',
2566                                 action='store', dest='format', metavar='FORMAT', help='video format code')
2567                 video_format.add_option('--all-formats',
2568                                 action='store_const', dest='format', help='download all available video formats', const='-1')
2569                 video_format.add_option('--max-quality',
2570                                 action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
2571                 parser.add_option_group(video_format)
2572
2573                 verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
2574                 verbosity.add_option('-q', '--quiet',
2575                                 action='store_true', dest='quiet', help='activates quiet mode', default=False)
2576                 verbosity.add_option('-s', '--simulate',
2577                                 action='store_true', dest='simulate', help='do not download video', default=False)
2578                 verbosity.add_option('-g', '--get-url',
2579                                 action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
2580                 verbosity.add_option('-e', '--get-title',
2581                                 action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
2582                 verbosity.add_option('--get-thumbnail',
2583                                 action='store_true', dest='getthumbnail',
2584                                 help='simulate, quiet but print thumbnail URL', default=False)
2585                 verbosity.add_option('--get-description',
2586                                 action='store_true', dest='getdescription',
2587                                 help='simulate, quiet but print video description', default=False)
2588                 verbosity.add_option('--get-filename',
2589                                 action='store_true', dest='getfilename',
2590                                 help='simulate, quiet but print output filename', default=False)
2591                 verbosity.add_option('--no-progress',
2592                                 action='store_true', dest='noprogress', help='do not print progress bar', default=False)
2593                 verbosity.add_option('--console-title',
2594                                 action='store_true', dest='consoletitle',
2595                                 help='display progress in console titlebar', default=False)
2596                 parser.add_option_group(verbosity)
2597
2598                 filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
2599                 filesystem.add_option('-t', '--title',
2600                                 action='store_true', dest='usetitle', help='use title in file name', default=False)
2601                 filesystem.add_option('-l', '--literal',
2602                                 action='store_true', dest='useliteral', help='use literal title in file name', default=False)
2603                 filesystem.add_option('-A', '--auto-number',
2604                                 action='store_true', dest='autonumber',
2605                                 help='number downloaded files starting from 00000', default=False)
2606                 filesystem.add_option('-o', '--output',
2607                                 dest='outtmpl', metavar='TEMPLATE', help='output filename template')
2608                 filesystem.add_option('-a', '--batch-file',
2609                                 dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
2610                 filesystem.add_option('-w', '--no-overwrites',
2611                                 action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
2612                 filesystem.add_option('-c', '--continue',
2613                                 action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
2614                 filesystem.add_option('--cookies',
2615                                 dest='cookiefile', metavar='FILE', help='file to dump cookie jar to')
2616                 filesystem.add_option('--no-part',
2617                                 action='store_true', dest='nopart', help='do not use .part files', default=False)
2618                 filesystem.add_option('--no-mtime',
2619                                 action='store_false', dest='updatetime',
2620                                 help='do not use the Last-modified header to set the file modification time', default=True)
2621                 parser.add_option_group(filesystem)
2622
2623                 (opts, args) = parser.parse_args()
2624
2625                 # Open appropriate CookieJar
2626                 if opts.cookiefile is None:
2627                         jar = cookielib.CookieJar()
2628                 else:
2629                         try:
2630                                 jar = cookielib.MozillaCookieJar(opts.cookiefile)
2631                                 if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK):
2632                                         jar.load()
2633                         except (IOError, OSError), err:
2634                                 sys.exit(u'ERROR: unable to open cookie file')
2635
2636                 # Dump user agent
2637                 if opts.dump_user_agent:
2638                         print std_headers['User-Agent']
2639                         sys.exit(0)
2640
2641                 # General configuration
2642                 cookie_processor = urllib2.HTTPCookieProcessor(jar)
2643                 urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler(), cookie_processor, YoutubeDLHandler()))
2644                 socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
2645
2646                 # Batch file verification
2647                 batchurls = []
2648                 if opts.batchfile is not None:
2649                         try:
2650                                 if opts.batchfile == '-':
2651                                         batchfd = sys.stdin
2652                                 else:
2653                                         batchfd = open(opts.batchfile, 'r')
2654                                 batchurls = batchfd.readlines()
2655                                 batchurls = [x.strip() for x in batchurls]
2656                                 batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
2657                         except IOError:
2658                                 sys.exit(u'ERROR: batch file could not be read')
2659                 all_urls = batchurls + args
2660
2661                 # Conflicting, missing and erroneous options
2662                 if opts.usenetrc and (opts.username is not None or opts.password is not None):
2663                         parser.error(u'using .netrc conflicts with giving username/password')
2664                 if opts.password is not None and opts.username is None:
2665                         parser.error(u'account username missing')
2666                 if opts.outtmpl is not None and (opts.useliteral or opts.usetitle or opts.autonumber):
2667                         parser.error(u'using output template conflicts with using title, literal title or auto number')
2668                 if opts.usetitle and opts.useliteral:
2669                         parser.error(u'using title conflicts with using literal title')
2670                 if opts.username is not None and opts.password is None:
2671                         opts.password = getpass.getpass(u'Type account password and press return:')
2672                 if opts.ratelimit is not None:
2673                         numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
2674                         if numeric_limit is None:
2675                                 parser.error(u'invalid rate limit specified')
2676                         opts.ratelimit = numeric_limit
2677                 if opts.retries is not None:
2678                         try:
2679                                 opts.retries = long(opts.retries)
2680                         except (TypeError, ValueError), err:
2681                                 parser.error(u'invalid retry count specified')
2682                 try:
2683                         opts.playliststart = long(opts.playliststart)
2684                         if opts.playliststart <= 0:
2685                                 raise ValueError
2686                 except (TypeError, ValueError), err:
2687                         parser.error(u'invalid playlist start number specified')
2688                 try:
2689                         opts.playlistend = long(opts.playlistend)
2690                         if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
2691                                 raise ValueError
2692                 except (TypeError, ValueError), err:
2693                         parser.error(u'invalid playlist end number specified')
2694
2695                 # Information extractors
2696                 vimeo_ie = VimeoIE()
2697                 youtube_ie = YoutubeIE()
2698                 metacafe_ie = MetacafeIE(youtube_ie)
2699                 dailymotion_ie = DailymotionIE()
2700                 youtube_pl_ie = YoutubePlaylistIE(youtube_ie)
2701                 youtube_user_ie = YoutubeUserIE(youtube_ie)
2702                 youtube_search_ie = YoutubeSearchIE(youtube_ie)
2703                 google_ie = GoogleIE()
2704                 google_search_ie = GoogleSearchIE(google_ie)
2705                 photobucket_ie = PhotobucketIE()
2706                 yahoo_ie = YahooIE()
2707                 yahoo_search_ie = YahooSearchIE(yahoo_ie)
2708                 deposit_files_ie = DepositFilesIE()
2709                 generic_ie = GenericIE()
2710
2711                 # File downloader
2712                 fd = FileDownloader({
2713                         'usenetrc': opts.usenetrc,
2714                         'username': opts.username,
2715                         'password': opts.password,
2716                         'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename),
2717                         'forceurl': opts.geturl,
2718                         'forcetitle': opts.gettitle,
2719                         'forcethumbnail': opts.getthumbnail,
2720                         'forcedescription': opts.getdescription,
2721                         'forcefilename': opts.getfilename,
2722                         'simulate': (opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename),
2723                         'format': opts.format,
2724                         'format_limit': opts.format_limit,
2725                         'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
2726                                 or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s')
2727                                 or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s')
2728                                 or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
2729                                 or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(stitle)s-%(id)s.%(ext)s')
2730                                 or (opts.useliteral and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
2731                                 or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
2732                                 or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
2733                                 or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
2734                                 or u'%(id)s.%(ext)s'),
2735                         'ignoreerrors': opts.ignoreerrors,
2736                         'ratelimit': opts.ratelimit,
2737                         'nooverwrites': opts.nooverwrites,
2738                         'retries': opts.retries,
2739                         'continuedl': opts.continue_dl,
2740                         'noprogress': opts.noprogress,
2741                         'playliststart': opts.playliststart,
2742                         'playlistend': opts.playlistend,
2743                         'logtostderr': opts.outtmpl == '-',
2744                         'consoletitle': opts.consoletitle,
2745                         'nopart': opts.nopart,
2746                         'updatetime': opts.updatetime,
2747                         })
2748                 fd.add_info_extractor(vimeo_ie)
2749                 fd.add_info_extractor(youtube_search_ie)
2750                 fd.add_info_extractor(youtube_pl_ie)
2751                 fd.add_info_extractor(youtube_user_ie)
2752                 fd.add_info_extractor(metacafe_ie)
2753                 fd.add_info_extractor(dailymotion_ie)
2754                 fd.add_info_extractor(youtube_ie)
2755                 fd.add_info_extractor(google_ie)
2756                 fd.add_info_extractor(google_search_ie)
2757                 fd.add_info_extractor(photobucket_ie)
2758                 fd.add_info_extractor(yahoo_ie)
2759                 fd.add_info_extractor(yahoo_search_ie)
2760                 fd.add_info_extractor(deposit_files_ie)
2761
2762                 # This must come last since it's the
2763                 # fallback if none of the others work
2764                 fd.add_info_extractor(generic_ie)
2765
2766                 # Update version
2767                 if opts.update_self:
2768                         update_self(fd, sys.argv[0])
2769
2770                 # Maybe do nothing
2771                 if len(all_urls) < 1:
2772                         if not opts.update_self:
2773                                 parser.error(u'you must provide at least one URL')
2774                         else:
2775                                 sys.exit()
2776                 retcode = fd.download(all_urls)
2777
2778                 # Dump cookie jar if requested
2779                 if opts.cookiefile is not None:
2780                         try:
2781                                 jar.save()
2782                         except (IOError, OSError), err:
2783                                 sys.exit(u'ERROR: unable to save cookie jar')
2784
2785                 sys.exit(retcode)
2786
2787         except DownloadError:
2788                 sys.exit(1)
2789         except SameFileError:
2790                 sys.exit(u'ERROR: fixed output name but more than one file to download')
2791         except KeyboardInterrupt:
2792                 sys.exit(u'\nERROR: Interrupted by user')