added whitespace below soundcloudIE class
[youtube-dl.git] / youtube-dl
index ecc4c26..2e29bad 100755 (executable)
@@ -10,15 +10,20 @@ __author__  = (
        'Paweł Paprota',
        'Gergely Imreh',
        'Rogério Brito',
        'Paweł Paprota',
        'Gergely Imreh',
        'Rogério Brito',
+       'Philipp Hagemeister',
+       'Sören Schulze',
        )
 
 __license__ = 'Public Domain'
        )
 
 __license__ = 'Public Domain'
-__version__ = '2011.08.28-phihag'
+__version__ = '2011.10.19'
+
+UPDATE_URL = 'https://raw.github.com/rg3/youtube-dl/master/youtube-dl'
 
 import cookielib
 import datetime
 import gzip
 import htmlentitydefs
 
 import cookielib
 import datetime
 import gzip
 import htmlentitydefs
+import HTMLParser
 import httplib
 import locale
 import math
 import httplib
 import locale
 import math
@@ -59,6 +64,11 @@ try:
 except ImportError:
        pass # Handled below
 
 except ImportError:
        pass # Handled below
 
+try:
+       import xml.etree.ElementTree
+except ImportError: # Python<2.5: Not officially supported, but let it slip
+       warnings.warn('xml.etree.ElementTree support is missing. Consider upgrading to Python >= 2.5 if you get related errors.')
+
 std_headers = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:5.0.1) Gecko/20100101 Firefox/5.0.1',
        'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
 std_headers = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:5.0.1) Gecko/20100101 Firefox/5.0.1',
        'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
@@ -198,6 +208,7 @@ def preferredencoding():
                        yield pref
        return yield_preferredencoding().next()
 
                        yield pref
        return yield_preferredencoding().next()
 
+
 def htmlentity_transform(matchobj):
        """Transforms an HTML entity to a Unicode character.
 
 def htmlentity_transform(matchobj):
        """Transforms an HTML entity to a Unicode character.
 
@@ -224,11 +235,13 @@ def htmlentity_transform(matchobj):
        # Unknown entity in name, return its literal representation
        return (u'&%s;' % entity)
 
        # Unknown entity in name, return its literal representation
        return (u'&%s;' % entity)
 
+
 def sanitize_title(utitle):
        """Sanitizes a video title so it could be used as part of a filename."""
        utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle)
        return utitle.replace(unicode(os.sep), u'%')
 
 def sanitize_title(utitle):
        """Sanitizes a video title so it could be used as part of a filename."""
        utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle)
        return utitle.replace(unicode(os.sep), u'%')
 
+
 def sanitize_open(filename, open_mode):
        """Try to open the given filename, and slightly tweak it if this fails.
 
 def sanitize_open(filename, open_mode):
        """Try to open the given filename, and slightly tweak it if this fails.
 
@@ -255,13 +268,15 @@ def sanitize_open(filename, open_mode):
                stream = open(filename, open_mode)
                return (stream, filename)
 
                stream = open(filename, open_mode)
                return (stream, filename)
 
+
 def timeconvert(timestr):
 def timeconvert(timestr):
-    """Convert RFC 2822 defined time string into system timestamp"""
-    timestamp = None
-    timetuple = email.utils.parsedate_tz(timestr)
-    if timetuple is not None:
-        timestamp = email.utils.mktime_tz(timetuple)
-    return timestamp
+       """Convert RFC 2822 defined time string into system timestamp"""
+       timestamp = None
+       timetuple = email.utils.parsedate_tz(timestr)
+       if timetuple is not None:
+               timestamp = email.utils.mktime_tz(timetuple)
+       return timestamp
+
 
 class DownloadError(Exception):
        """Download Error exception.
 
 class DownloadError(Exception):
        """Download Error exception.
@@ -272,6 +287,7 @@ class DownloadError(Exception):
        """
        pass
 
        """
        pass
 
+
 class SameFileError(Exception):
        """Same File exception.
 
 class SameFileError(Exception):
        """Same File exception.
 
@@ -280,6 +296,7 @@ class SameFileError(Exception):
        """
        pass
 
        """
        pass
 
+
 class PostProcessingError(Exception):
        """Post Processing exception.
 
 class PostProcessingError(Exception):
        """Post Processing exception.
 
@@ -288,6 +305,7 @@ class PostProcessingError(Exception):
        """
        pass
 
        """
        pass
 
+
 class UnavailableVideoError(Exception):
        """Unavailable Format exception.
 
 class UnavailableVideoError(Exception):
        """Unavailable Format exception.
 
@@ -296,6 +314,7 @@ class UnavailableVideoError(Exception):
        """
        pass
 
        """
        pass
 
+
 class ContentTooShortError(Exception):
        """Content Too Short exception.
 
 class ContentTooShortError(Exception):
        """Content Too Short exception.
 
@@ -311,6 +330,7 @@ class ContentTooShortError(Exception):
                self.downloaded = downloaded
                self.expected = expected
 
                self.downloaded = downloaded
                self.expected = expected
 
+
 class YoutubeDLHandler(urllib2.HTTPHandler):
        """Handler for HTTP requests and responses.
 
 class YoutubeDLHandler(urllib2.HTTPHandler):
        """Handler for HTTP requests and responses.
 
@@ -320,11 +340,11 @@ class YoutubeDLHandler(urllib2.HTTPHandler):
        a particular request, the original request in the program code only has
        to include the HTTP header "Youtubedl-No-Compression", which will be
        removed before making the real request.
        a particular request, the original request in the program code only has
        to include the HTTP header "Youtubedl-No-Compression", which will be
        removed before making the real request.
-       
+
        Part of this code was copied from:
 
        Part of this code was copied from:
 
-         http://techknack.net/python-urllib2-handlers/
-         
+       http://techknack.net/python-urllib2-handlers/
+
        Andrew Rowls, the author of that code, agreed to release it to the
        public domain.
        """
        Andrew Rowls, the author of that code, agreed to release it to the
        public domain.
        """
@@ -335,7 +355,7 @@ class YoutubeDLHandler(urllib2.HTTPHandler):
                        return zlib.decompress(data, -zlib.MAX_WBITS)
                except zlib.error:
                        return zlib.decompress(data)
                        return zlib.decompress(data, -zlib.MAX_WBITS)
                except zlib.error:
                        return zlib.decompress(data)
-       
+
        @staticmethod
        def addinfourl_wrapper(stream, headers, url, code):
                if hasattr(urllib2.addinfourl, 'getcode'):
        @staticmethod
        def addinfourl_wrapper(stream, headers, url, code):
                if hasattr(urllib2.addinfourl, 'getcode'):
@@ -343,7 +363,7 @@ class YoutubeDLHandler(urllib2.HTTPHandler):
                ret = urllib2.addinfourl(stream, headers, url)
                ret.code = code
                return ret
                ret = urllib2.addinfourl(stream, headers, url)
                ret.code = code
                return ret
-       
+
        def http_request(self, req):
                for h in std_headers:
                        if h in req.headers:
        def http_request(self, req):
                for h in std_headers:
                        if h in req.headers:
@@ -369,6 +389,7 @@ class YoutubeDLHandler(urllib2.HTTPHandler):
                        resp.msg = old_resp.msg
                return resp
 
                        resp.msg = old_resp.msg
                return resp
 
+
 class FileDownloader(object):
        """File Downloader class.
 
 class FileDownloader(object):
        """File Downloader class.
 
@@ -417,6 +438,8 @@ class FileDownloader(object):
        noprogress:       Do not print the progress bar.
        playliststart:    Playlist item to start at.
        playlistend:      Playlist item to end at.
        noprogress:       Do not print the progress bar.
        playliststart:    Playlist item to start at.
        playlistend:      Playlist item to end at.
+       matchtitle:       Download only matching titles.
+       rejecttitle:      Reject downloads for matching titles.
        logtostderr:      Log messages to stderr instead of stdout.
        consoletitle:     Display progress in console window's titlebar.
        nopart:           Do not use temporary .part files.
        logtostderr:      Log messages to stderr instead of stdout.
        consoletitle:     Display progress in console window's titlebar.
        nopart:           Do not use temporary .part files.
@@ -442,16 +465,6 @@ class FileDownloader(object):
                self.params = params
 
        @staticmethod
                self.params = params
 
        @staticmethod
-       def pmkdir(filename):
-               """Create directory components in filename. Similar to Unix "mkdir -p"."""
-               components = filename.split(os.sep)
-               aggregate = [os.sep.join(components[0:x]) for x in xrange(1, len(components))]
-               aggregate = ['%s%s' % (x, os.sep) for x in aggregate] # Finish names with separator
-               for dir in aggregate:
-                       if not os.path.exists(dir):
-                               os.mkdir(dir)
-
-       @staticmethod
        def format_bytes(bytes):
                if bytes is None:
                        return 'N/A'
        def format_bytes(bytes):
                if bytes is None:
                        return 'N/A'
@@ -462,7 +475,7 @@ class FileDownloader(object):
                else:
                        exponent = long(math.log(bytes, 1024.0))
                suffix = 'bkMGTPEZY'[exponent]
                else:
                        exponent = long(math.log(bytes, 1024.0))
                suffix = 'bkMGTPEZY'[exponent]
-               converted = float(bytes) / float(1024**exponent)
+               converted = float(bytes) / float(1024 ** exponent)
                return '%.2f%s' % (converted, suffix)
 
        @staticmethod
                return '%.2f%s' % (converted, suffix)
 
        @staticmethod
@@ -600,7 +613,7 @@ class FileDownloader(object):
                        os.rename(old_filename, new_filename)
                except (IOError, OSError), err:
                        self.trouble(u'ERROR: unable to rename file')
                        os.rename(old_filename, new_filename)
                except (IOError, OSError), err:
                        self.trouble(u'ERROR: unable to rename file')
-       
+
        def try_utime(self, filename, last_modified_hdr):
                """Try to set the last-modified time of the given file."""
                if last_modified_hdr is None:
        def try_utime(self, filename, last_modified_hdr):
                """Try to set the last-modified time of the given file."""
                if last_modified_hdr is None:
@@ -612,11 +625,12 @@ class FileDownloader(object):
                        return
                filetime = timeconvert(timestr)
                if filetime is None:
                        return
                filetime = timeconvert(timestr)
                if filetime is None:
-                       return
+                       return filetime
                try:
                try:
-                       os.utime(filename,(time.time(), filetime))
+                       os.utime(filename, (time.time(), filetime))
                except:
                        pass
                except:
                        pass
+               return filetime
 
        def report_writedescription(self, descfn):
                """ Report that the description file is being written """
 
        def report_writedescription(self, descfn):
                """ Report that the description file is being written """
@@ -684,32 +698,48 @@ class FileDownloader(object):
        def process_info(self, info_dict):
                """Process a single dictionary returned by an InfoExtractor."""
                filename = self.prepare_filename(info_dict)
        def process_info(self, info_dict):
                """Process a single dictionary returned by an InfoExtractor."""
                filename = self.prepare_filename(info_dict)
+               
+               # Forced printings
+               if self.params.get('forcetitle', False):
+                       print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
+               if self.params.get('forceurl', False):
+                       print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace')
+               if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
+                       print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace')
+               if self.params.get('forcedescription', False) and 'description' in info_dict:
+                       print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace')
+               if self.params.get('forcefilename', False) and filename is not None:
+                       print filename.encode(preferredencoding(), 'xmlcharrefreplace')
+               if self.params.get('forceformat', False):
+                       print info_dict['format'].encode(preferredencoding(), 'xmlcharrefreplace')
+
                # Do nothing else if in simulate mode
                if self.params.get('simulate', False):
                # Do nothing else if in simulate mode
                if self.params.get('simulate', False):
-                       # Forced printings
-                       if self.params.get('forcetitle', False):
-                               print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
-                       if self.params.get('forceurl', False):
-                               print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace')
-                       if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
-                               print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace')
-                       if self.params.get('forcedescription', False) and 'description' in info_dict:
-                               print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace')
-                       if self.params.get('forcefilename', False) and filename is not None:
-                               print filename.encode(preferredencoding(), 'xmlcharrefreplace')
-
                        return
 
                if filename is None:
                        return
                        return
 
                if filename is None:
                        return
+
+               matchtitle=self.params.get('matchtitle',False)
+               rejecttitle=self.params.get('rejecttitle',False)
+               title=info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
+               if matchtitle and not re.search(matchtitle, title, re.IGNORECASE):
+                       self.to_screen(u'[download] "%s" title did not match pattern "%s"' % (title, matchtitle))
+                       return
+               if rejecttitle and re.search(rejecttitle, title, re.IGNORECASE):
+                       self.to_screen(u'[download] "%s" title matched reject pattern "%s"' % (title, rejecttitle))
+                       return
+                       
                if self.params.get('nooverwrites', False) and os.path.exists(filename):
                        self.to_stderr(u'WARNING: file exists and will be skipped')
                        return
 
                try:
                if self.params.get('nooverwrites', False) and os.path.exists(filename):
                        self.to_stderr(u'WARNING: file exists and will be skipped')
                        return
 
                try:
-                       self.pmkdir(filename)
+                       dn = os.path.dirname(filename)
+                       if dn != '' and not os.path.exists(dn):
+                               os.makedirs(dn)
                except (OSError, IOError), err:
                except (OSError, IOError), err:
-                       self.trouble(u'ERROR: unable to create directories: %s' % str(err))
+                       self.trouble(u'ERROR: unable to create directory ' + unicode(err))
                        return
 
                if self.params.get('writedescription', False):
                        return
 
                if self.params.get('writedescription', False):
@@ -722,7 +752,7 @@ class FileDownloader(object):
                                finally:
                                        descfile.close()
                        except (OSError, IOError):
                                finally:
                                        descfile.close()
                        except (OSError, IOError):
-                               self.trouble(u'ERROR: Cannot write description file: %s' % str(descfn))
+                               self.trouble(u'ERROR: Cannot write description file ' + descfn)
                                return
 
                if self.params.get('writeinfojson', False):
                                return
 
                if self.params.get('writeinfojson', False):
@@ -736,30 +766,32 @@ class FileDownloader(object):
                        try:
                                infof = open(infofn, 'wb')
                                try:
                        try:
                                infof = open(infofn, 'wb')
                                try:
-                                       json.dump(info_dict, infof)
+                                       json_info_dict = dict((k,v) for k,v in info_dict.iteritems() if not k in ('urlhandle',))
+                                       json.dump(json_info_dict, infof)
                                finally:
                                        infof.close()
                        except (OSError, IOError):
                                finally:
                                        infof.close()
                        except (OSError, IOError):
-                               self.trouble(u'ERROR: Cannot write metadata to JSON file: %s' % str(infofn))
+                               self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn)
                                return
 
                                return
 
-               try:
-                       success = self._do_download(filename, info_dict['url'].encode('utf-8'), info_dict.get('player_url', None))
-               except (OSError, IOError), err:
-                       raise UnavailableVideoError
-               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-                       self.trouble(u'ERROR: unable to download video data: %s' % str(err))
-                       return
-               except (ContentTooShortError, ), err:
-                       self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
-                       return
-
-               if success:
+               if not self.params.get('skip_download', False):
                        try:
                        try:
-                               self.post_process(filename, info_dict)
-                       except (PostProcessingError), err:
-                               self.trouble(u'ERROR: postprocessing: %s' % str(err))
+                               success = self._do_download(filename, info_dict)
+                       except (OSError, IOError), err:
+                               raise UnavailableVideoError
+                       except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                               self.trouble(u'ERROR: unable to download video data: %s' % str(err))
+                               return
+                       except (ContentTooShortError, ), err:
+                               self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
                                return
                                return
+       
+                       if success:
+                               try:
+                                       self.post_process(filename, info_dict)
+                               except (PostProcessingError), err:
+                                       self.trouble(u'ERROR: postprocessing: %s' % str(err))
+                                       return
 
        def download(self, url_list):
                """Download a given list of URLs."""
 
        def download(self, url_list):
                """Download a given list of URLs."""
@@ -820,6 +852,11 @@ class FileDownloader(object):
                        cursize = os.path.getsize(tmpfilename)
                        if prevsize == cursize and retval == 1:
                                break
                        cursize = os.path.getsize(tmpfilename)
                        if prevsize == cursize and retval == 1:
                                break
+                        # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
+                       if prevsize == cursize and retval == 2 and cursize > 1024:
+                               self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
+                               retval = 0
+                               break
                if retval == 0:
                        self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(tmpfilename))
                        self.try_rename(tmpfilename, filename)
                if retval == 0:
                        self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(tmpfilename))
                        self.try_rename(tmpfilename, filename)
@@ -828,7 +865,10 @@ class FileDownloader(object):
                        self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
                        return False
 
                        self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
                        return False
 
-       def _do_download(self, filename, url, player_url):
+       def _do_download(self, filename, info_dict):
+               url = info_dict['url']
+               player_url = info_dict.get('player_url', None)
+
                # Check file already present
                if self.params.get('continuedl', False) and os.path.isfile(filename) and not self.params.get('nopart', False):
                        self.report_file_already_downloaded(filename)
                # Check file already present
                if self.params.get('continuedl', False) and os.path.isfile(filename) and not self.params.get('nopart', False):
                        self.report_file_already_downloaded(filename)
@@ -840,7 +880,6 @@ class FileDownloader(object):
 
                tmpfilename = self.temp_name(filename)
                stream = None
 
                tmpfilename = self.temp_name(filename)
                stream = None
-               open_mode = 'wb'
 
                # Do not include the Accept-Encoding header
                headers = {'Youtubedl-no-compression': 'True'}
 
                # Do not include the Accept-Encoding header
                headers = {'Youtubedl-no-compression': 'True'}
@@ -853,17 +892,22 @@ class FileDownloader(object):
                else:
                        resume_len = 0
 
                else:
                        resume_len = 0
 
-               # Request parameters in case of being able to resume
-               if self.params.get('continuedl', False) and resume_len != 0:
-                       self.report_resuming_byte(resume_len)
-                       request.add_header('Range','bytes=%d-' % resume_len)
-                       open_mode = 'ab'
+               open_mode = 'wb'
+               if resume_len != 0:
+                       if self.params.get('continuedl', False):
+                               self.report_resuming_byte(resume_len)
+                               request.add_header('Range','bytes=%d-' % resume_len)
+                               open_mode = 'ab'
+                       else:
+                               resume_len = 0
 
                count = 0
                retries = self.params.get('retries', 0)
                while count <= retries:
                        # Establish connection
                        try:
 
                count = 0
                retries = self.params.get('retries', 0)
                while count <= retries:
                        # Establish connection
                        try:
+                               if count == 0 and 'urlhandle' in info_dict:
+                                       data = info_dict['urlhandle']
                                data = urllib2.urlopen(request)
                                break
                        except (urllib2.HTTPError, ), err:
                                data = urllib2.urlopen(request)
                                break
                        except (urllib2.HTTPError, ), err:
@@ -882,7 +926,7 @@ class FileDownloader(object):
                                        else:
                                                # Examine the reported length
                                                if (content_length is not None and
                                        else:
                                                # Examine the reported length
                                                if (content_length is not None and
-                                                       (resume_len - 100 < long(content_length) < resume_len + 100)):
+                                                               (resume_len - 100 < long(content_length) < resume_len + 100)):
                                                        # The file had already been fully downloaded.
                                                        # Explanation to the above condition: in issue #175 it was revealed that
                                                        # YouTube sometimes adds or removes a few bytes from the end of the file,
                                                        # The file had already been fully downloaded.
                                                        # Explanation to the above condition: in issue #175 it was revealed that
                                                        # YouTube sometimes adds or removes a few bytes from the end of the file,
@@ -927,6 +971,7 @@ class FileDownloader(object):
                        if stream is None:
                                try:
                                        (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
                        if stream is None:
                                try:
                                        (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
+                                       assert stream is not None
                                        filename = self.undo_temp_name(tmpfilename)
                                        self.report_destination(filename)
                                except (OSError, IOError), err:
                                        filename = self.undo_temp_name(tmpfilename)
                                        self.report_destination(filename)
                                except (OSError, IOError), err:
@@ -940,14 +985,20 @@ class FileDownloader(object):
                        block_size = self.best_block_size(after - before, len(data_block))
 
                        # Progress message
                        block_size = self.best_block_size(after - before, len(data_block))
 
                        # Progress message
-                       percent_str = self.calc_percent(byte_counter, data_len)
-                       eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
                        speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len)
                        speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len)
-                       self.report_progress(percent_str, data_len_str, speed_str, eta_str)
+                       if data_len is None:
+                               self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA')
+                       else:
+                               percent_str = self.calc_percent(byte_counter, data_len)
+                               eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
+                               self.report_progress(percent_str, data_len_str, speed_str, eta_str)
 
                        # Apply rate limit
                        self.slow_down(start, byte_counter - resume_len)
 
 
                        # Apply rate limit
                        self.slow_down(start, byte_counter - resume_len)
 
+               if stream is None:
+                       self.trouble(u'\nERROR: Did not get any data blocks')
+                       return False
                stream.close()
                self.report_finish()
                if data_len is not None and byte_counter != data_len:
                stream.close()
                self.report_finish()
                if data_len is not None and byte_counter != data_len:
@@ -956,10 +1007,11 @@ class FileDownloader(object):
 
                # Update file modification time
                if self.params.get('updatetime', True):
 
                # Update file modification time
                if self.params.get('updatetime', True):
-                       self.try_utime(filename, data.info().get('last-modified', None))
+                       info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
 
                return True
 
 
                return True
 
+
 class InfoExtractor(object):
        """Information Extractor class.
 
 class InfoExtractor(object):
        """Information Extractor class.
 
@@ -990,9 +1042,8 @@ class InfoExtractor(object):
        description:    One-line video description.
 
        Subclasses of this one should re-define the _real_initialize() and
        description:    One-line video description.
 
        Subclasses of this one should re-define the _real_initialize() and
-       _real_extract() methods, as well as the suitable() static method.
-       Probably, they should also be instantiated and added to the main
-       downloader.
+       _real_extract() methods and define a _VALID_URL regexp.
+       Probably, they should also be added to the list of extractors.
        """
 
        _ready = False
        """
 
        _ready = False
@@ -1003,10 +1054,9 @@ class InfoExtractor(object):
                self._ready = False
                self.set_downloader(downloader)
 
                self._ready = False
                self.set_downloader(downloader)
 
-       @staticmethod
-       def suitable(url):
+       def suitable(self, url):
                """Receives a URL and returns True if suitable for this IE."""
                """Receives a URL and returns True if suitable for this IE."""
-               return False
+               return re.match(self._VALID_URL, url) is not None
 
        def initialize(self):
                """Initializes an instance (authentication, etc)."""
 
        def initialize(self):
                """Initializes an instance (authentication, etc)."""
@@ -1031,16 +1081,17 @@ class InfoExtractor(object):
                """Real extraction process. Redefine in subclasses."""
                pass
 
                """Real extraction process. Redefine in subclasses."""
                pass
 
+
 class YoutubeIE(InfoExtractor):
        """Information extractor for youtube.com."""
 
 class YoutubeIE(InfoExtractor):
        """Information extractor for youtube.com."""
 
-       _VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?:(?:(?:v|embed|e)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))?)?([0-9A-Za-z_-]+)(?(1).+)?$'
+       _VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?!view_play_list|my_playlists|artist|playlist)(?:(?:(?:v|embed|e)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))?)?([0-9A-Za-z_-]+)(?(1).+)?$'
        _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
        _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
        _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
        _NETRC_MACHINE = 'youtube'
        # Listed in order of quality
        _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
        _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
        _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
        _NETRC_MACHINE = 'youtube'
        # Listed in order of quality
-       _available_formats = ['38', '37', '22', '45', '35', '34', '43', '18', '6', '5', '17', '13']
+       _available_formats = ['38', '37', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13']
        _video_extensions = {
                '13': '3gp',
                '17': 'mp4',
        _video_extensions = {
                '13': '3gp',
                '17': 'mp4',
@@ -1049,12 +1100,25 @@ class YoutubeIE(InfoExtractor):
                '37': 'mp4',
                '38': 'video', # You actually don't know if this will be MOV, AVI or whatever
                '43': 'webm',
                '37': 'mp4',
                '38': 'video', # You actually don't know if this will be MOV, AVI or whatever
                '43': 'webm',
+               '44': 'webm',
                '45': 'webm',
        }
                '45': 'webm',
        }
-
-       @staticmethod
-       def suitable(url):
-               return (re.match(YoutubeIE._VALID_URL, url) is not None)
+       _video_dimensions = {
+               '5': '240x400',
+               '6': '???',
+               '13': '???',
+               '17': '144x176',
+               '18': '360x640',
+               '22': '720x1280',
+               '34': '360x640',
+               '35': '480x854',
+               '37': '1080x1920',
+               '38': '3072x4096',
+               '43': '360x640',
+               '44': '480x854',
+               '45': '720x1280',
+       }       
+       IE_NAME = u'youtube'
 
        def report_lang(self):
                """Report attempt to set language."""
 
        def report_lang(self):
                """Report attempt to set language."""
@@ -1088,6 +1152,11 @@ class YoutubeIE(InfoExtractor):
                """Indicate the download will use the RTMP protocol."""
                self._downloader.to_screen(u'[youtube] RTMP download detected')
 
                """Indicate the download will use the RTMP protocol."""
                self._downloader.to_screen(u'[youtube] RTMP download detected')
 
+       def _print_formats(self, formats):
+               print 'Available formats:'
+               for x in formats:
+                       print '%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???'))
+
        def _real_initialize(self):
                if self._downloader is None:
                        return
        def _real_initialize(self):
                if self._downloader is None:
                        return
@@ -1167,7 +1236,7 @@ class YoutubeIE(InfoExtractor):
 
                # Get video webpage
                self.report_video_webpage_download(video_id)
 
                # Get video webpage
                self.report_video_webpage_download(video_id)
-               request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&amp;has_verified=1' % video_id)
+               request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id)
                try:
                        video_webpage = urllib2.urlopen(request).read()
                except (urllib2.URLError, httplib.HTTPException, socket.error), err:
                try:
                        video_webpage = urllib2.urlopen(request).read()
                except (urllib2.URLError, httplib.HTTPException, socket.error), err:
@@ -1185,7 +1254,7 @@ class YoutubeIE(InfoExtractor):
                self.report_video_info_webpage_download(video_id)
                for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
                        video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
                self.report_video_info_webpage_download(video_id)
                for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
                        video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
-                                          % (video_id, el_type))
+                                       % (video_id, el_type))
                        request = urllib2.Request(video_info_url)
                        try:
                                video_info_webpage = urllib2.urlopen(request).read()
                        request = urllib2.Request(video_info_url)
                        try:
                                video_info_webpage = urllib2.urlopen(request).read()
@@ -1281,16 +1350,27 @@ class YoutubeIE(InfoExtractor):
                        if len(existing_formats) == 0:
                                self._downloader.trouble(u'ERROR: no known formats available for video')
                                return
                        if len(existing_formats) == 0:
                                self._downloader.trouble(u'ERROR: no known formats available for video')
                                return
-                       if req_format is None:
+                       if self._downloader.params.get('listformats', None):
+                               self._print_formats(existing_formats)
+                               return
+                       if req_format is None or req_format == 'best':
                                video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
                                video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
-                       elif req_format == '-1':
+                       elif req_format == 'worst':
+                               video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
+                       elif req_format in ('-1', 'all'):
                                video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
                        else:
                                video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
                        else:
-                               # Specific format
-                               if req_format not in url_map:
+                               # Specific formats. We pick the first in a slash-delimeted sequence.
+                               # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
+                               req_formats = req_format.split('/')
+                               video_url_list = None
+                               for rf in req_formats:
+                                       if rf in url_map:
+                                               video_url_list = [(rf, url_map[rf])]
+                                               break
+                               if video_url_list is None:
                                        self._downloader.trouble(u'ERROR: requested format not available')
                                        return
                                        self._downloader.trouble(u'ERROR: requested format not available')
                                        return
-                               video_url_list = [(req_format, url_map[req_format])] # Specific format
                else:
                        self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info')
                        return
                else:
                        self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info')
                        return
@@ -1328,15 +1408,12 @@ class MetacafeIE(InfoExtractor):
        _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
        _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
        _youtube_ie = None
        _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
        _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
        _youtube_ie = None
+       IE_NAME = u'metacafe'
 
        def __init__(self, youtube_ie, downloader=None):
                InfoExtractor.__init__(self, downloader)
                self._youtube_ie = youtube_ie
 
 
        def __init__(self, youtube_ie, downloader=None):
                InfoExtractor.__init__(self, downloader)
                self._youtube_ie = youtube_ie
 
-       @staticmethod
-       def suitable(url):
-               return (re.match(MetacafeIE._VALID_URL, url) is not None)
-
        def report_disclaimer(self):
                """Report disclaimer retrieval."""
                self._downloader.to_screen(u'[metacafe] Retrieving disclaimer')
        def report_disclaimer(self):
                """Report disclaimer retrieval."""
                self._downloader.to_screen(u'[metacafe] Retrieving disclaimer')
@@ -1470,14 +1547,11 @@ class DailymotionIE(InfoExtractor):
        """Information Extractor for Dailymotion"""
 
        _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^_/]+)_([^/]+)'
        """Information Extractor for Dailymotion"""
 
        _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^_/]+)_([^/]+)'
+       IE_NAME = u'dailymotion'
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
-       @staticmethod
-       def suitable(url):
-               return (re.match(DailymotionIE._VALID_URL, url) is not None)
-
        def report_download_webpage(self, video_id):
                """Report webpage download."""
                self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id)
        def report_download_webpage(self, video_id):
                """Report webpage download."""
                self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id)
@@ -1505,6 +1579,7 @@ class DailymotionIE(InfoExtractor):
 
                # Retrieve video webpage to extract further information
                request = urllib2.Request(url)
 
                # Retrieve video webpage to extract further information
                request = urllib2.Request(url)
+               request.add_header('Cookie', 'family_filter=off')
                try:
                        self.report_download_webpage(video_id)
                        webpage = urllib2.urlopen(request).read()
                try:
                        self.report_download_webpage(video_id)
                        webpage = urllib2.urlopen(request).read()
@@ -1514,25 +1589,29 @@ class DailymotionIE(InfoExtractor):
 
                # Extract URL, uploader and title from webpage
                self.report_extraction(video_id)
 
                # Extract URL, uploader and title from webpage
                self.report_extraction(video_id)
-               mobj = re.search(r'(?i)addVariable\(\"video\"\s*,\s*\"([^\"]*)\"\)', webpage)
+               mobj = re.search(r'(?i)addVariable\(\"sequence\"\s*,\s*\"([^\"]+?)\"\)', webpage)
                if mobj is None:
                        self._downloader.trouble(u'ERROR: unable to extract media URL')
                        return
                if mobj is None:
                        self._downloader.trouble(u'ERROR: unable to extract media URL')
                        return
-               mediaURL = urllib.unquote(mobj.group(1))
+               sequence = urllib.unquote(mobj.group(1))
+               mobj = re.search(r',\"sdURL\"\:\"([^\"]+?)\",', sequence)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract media URL')
+                       return
+               mediaURL = urllib.unquote(mobj.group(1)).replace('\\', '')
 
                # if needed add http://www.dailymotion.com/ if relative URL
 
                video_url = mediaURL
 
 
                # if needed add http://www.dailymotion.com/ if relative URL
 
                video_url = mediaURL
 
-               # '<meta\s+name="title"\s+content="Dailymotion\s*[:\-]\s*(.*?)"\s*\/\s*>'
-               mobj = re.search(r'(?im)<title>Dailymotion\s*[\-:]\s*(.+?)</title>', webpage)
+               mobj = re.search(r'(?im)<title>Dailymotion\s*-\s*(.+)\s*-\s*[^<]+?</title>', webpage)
                if mobj is None:
                        self._downloader.trouble(u'ERROR: unable to extract title')
                        return
                video_title = mobj.group(1).decode('utf-8')
                video_title = sanitize_title(video_title)
 
                if mobj is None:
                        self._downloader.trouble(u'ERROR: unable to extract title')
                        return
                video_title = mobj.group(1).decode('utf-8')
                video_title = sanitize_title(video_title)
 
-               mobj = re.search(r'(?im)<Attribute name="owner">(.+?)</Attribute>', webpage)
+               mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a></span>', webpage)
                if mobj is None:
                        self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
                        return
                if mobj is None:
                        self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
                        return
@@ -1554,18 +1633,16 @@ class DailymotionIE(InfoExtractor):
                except UnavailableVideoError:
                        self._downloader.trouble(u'\nERROR: unable to download video')
 
                except UnavailableVideoError:
                        self._downloader.trouble(u'\nERROR: unable to download video')
 
+
 class GoogleIE(InfoExtractor):
        """Information extractor for video.google.com."""
 
        _VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*'
 class GoogleIE(InfoExtractor):
        """Information extractor for video.google.com."""
 
        _VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*'
+       IE_NAME = u'video.google'
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
-       @staticmethod
-       def suitable(url):
-               return (re.match(GoogleIE._VALID_URL, url) is not None)
-
        def report_download_webpage(self, video_id):
                """Report webpage download."""
                self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id)
        def report_download_webpage(self, video_id):
                """Report webpage download."""
                self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id)
@@ -1647,7 +1724,6 @@ class GoogleIE(InfoExtractor):
                else:   # we need something to pass to process_info
                        video_thumbnail = ''
 
                else:   # we need something to pass to process_info
                        video_thumbnail = ''
 
-
                try:
                        # Process video information
                        self._downloader.process_info({
                try:
                        # Process video information
                        self._downloader.process_info({
@@ -1669,14 +1745,11 @@ class PhotobucketIE(InfoExtractor):
        """Information extractor for photobucket.com."""
 
        _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
        """Information extractor for photobucket.com."""
 
        _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
+       IE_NAME = u'photobucket'
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
-       @staticmethod
-       def suitable(url):
-               return (re.match(PhotobucketIE._VALID_URL, url) is not None)
-
        def report_download_webpage(self, video_id):
                """Report webpage download."""
                self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id)
        def report_download_webpage(self, video_id):
                """Report webpage download."""
                self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id)
@@ -1754,14 +1827,11 @@ class YahooIE(InfoExtractor):
        # _VPAGE_URL matches only the extractable '/watch/' URLs
        _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
        _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
        # _VPAGE_URL matches only the extractable '/watch/' URLs
        _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
        _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
+       IE_NAME = u'video.yahoo'
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
-       @staticmethod
-       def suitable(url):
-               return (re.match(YahooIE._VALID_URL, url) is not None)
-
        def report_download_webpage(self, video_id):
                """Report webpage download."""
                self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id)
        def report_download_webpage(self, video_id):
                """Report webpage download."""
                self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id)
@@ -1847,7 +1917,8 @@ class YahooIE(InfoExtractor):
                        self._downloader.trouble(u'ERROR: unable to extract video description')
                        return
                video_description = mobj.group(1).decode('utf-8')
                        self._downloader.trouble(u'ERROR: unable to extract video description')
                        return
                video_description = mobj.group(1).decode('utf-8')
-               if not video_description: video_description = 'No description available.'
+               if not video_description:
+                       video_description = 'No description available.'
 
                # Extract video height and width
                mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
 
                # Extract video height and width
                mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
@@ -1868,8 +1939,8 @@ class YahooIE(InfoExtractor):
                yv_lg = 'R0xx6idZnW2zlrKP8xxAIR'  # not sure what this represents
                yv_bitrate = '700'  # according to Wikipedia this is hard-coded
                request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
                yv_lg = 'R0xx6idZnW2zlrKP8xxAIR'  # not sure what this represents
                yv_bitrate = '700'  # according to Wikipedia this is hard-coded
                request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
-                                                                 '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
-                                                                 '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
+                               '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
+                               '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
                try:
                        self.report_download_webpage(video_id)
                        webpage = urllib2.urlopen(request).read()
                try:
                        self.report_download_webpage(video_id)
                        webpage = urllib2.urlopen(request).read()
@@ -1898,7 +1969,6 @@ class YahooIE(InfoExtractor):
                                'thumbnail':    video_thumbnail.decode('utf-8'),
                                'description':  video_description,
                                'thumbnail':    video_thumbnail,
                                'thumbnail':    video_thumbnail.decode('utf-8'),
                                'description':  video_description,
                                'thumbnail':    video_thumbnail,
-                               'description':  video_description,
                                'player_url':   None,
                        })
                except UnavailableVideoError:
                                'player_url':   None,
                        })
                except UnavailableVideoError:
@@ -1910,14 +1980,11 @@ class VimeoIE(InfoExtractor):
 
        # _VALID_URL matches Vimeo URLs
        _VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:groups/[^/]+/)?(?:videos?/)?([0-9]+)'
 
        # _VALID_URL matches Vimeo URLs
        _VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:groups/[^/]+/)?(?:videos?/)?([0-9]+)'
+       IE_NAME = u'vimeo'
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
-       @staticmethod
-       def suitable(url):
-               return (re.match(VimeoIE._VALID_URL, url) is not None)
-
        def report_download_webpage(self, video_id):
                """Report webpage download."""
                self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id)
        def report_download_webpage(self, video_id):
                """Report webpage download."""
                self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id)
@@ -1992,6 +2059,18 @@ class VimeoIE(InfoExtractor):
                        return
                sig = mobj.group(1).decode('utf-8')
 
                        return
                sig = mobj.group(1).decode('utf-8')
 
+               # Vimeo specific: extract video quality information
+               mobj = re.search(r'<isHD>(\d+)</isHD>', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract video quality information')
+                       return
+               quality = mobj.group(1).decode('utf-8')
+
+               if int(quality) == 1:
+                       quality = 'hd'
+               else:
+                       quality = 'sd'
+
                # Vimeo specific: Extract request signature expiration
                mobj = re.search(r'<request_signature_expires>(.*?)</request_signature_expires>', webpage)
                if mobj is None:
                # Vimeo specific: Extract request signature expiration
                mobj = re.search(r'<request_signature_expires>(.*?)</request_signature_expires>', webpage)
                if mobj is None:
@@ -1999,7 +2078,7 @@ class VimeoIE(InfoExtractor):
                        return
                sig_exp = mobj.group(1).decode('utf-8')
 
                        return
                sig_exp = mobj.group(1).decode('utf-8')
 
-               video_url = "http://vimeo.com/moogaloop/play/clip:%s/%s/%s" % (video_id, sig, sig_exp)
+               video_url = "http://vimeo.com/moogaloop/play/clip:%s/%s/%s/?q=%s" % (video_id, sig, sig_exp, quality)
 
                try:
                        # Process video information
 
                try:
                        # Process video information
@@ -2024,13 +2103,12 @@ class VimeoIE(InfoExtractor):
 class GenericIE(InfoExtractor):
        """Generic last-resort information extractor."""
 
 class GenericIE(InfoExtractor):
        """Generic last-resort information extractor."""
 
+       _VALID_URL = r'.*'
+       IE_NAME = u'generic'
+
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
-       @staticmethod
-       def suitable(url):
-               return True
-
        def report_download_webpage(self, video_id):
                """Report webpage download."""
                self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
        def report_download_webpage(self, video_id):
                """Report webpage download."""
                self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
@@ -2078,11 +2156,11 @@ class GenericIE(InfoExtractor):
                        return
 
                video_url = urllib.unquote(mobj.group(1))
                        return
 
                video_url = urllib.unquote(mobj.group(1))
-               video_id  = os.path.basename(video_url)
+               video_id = os.path.basename(video_url)
 
                # here's a fun little line of code for you:
                video_extension = os.path.splitext(video_id)[1][1:]
 
                # here's a fun little line of code for you:
                video_extension = os.path.splitext(video_id)[1][1:]
-               video_id        = os.path.splitext(video_id)[0]
+               video_id = os.path.splitext(video_id)[0]
 
                # it's tempting to parse this further, but you would
                # have to take into account all the variations like
 
                # it's tempting to parse this further, but you would
                # have to take into account all the variations like
@@ -2124,21 +2202,18 @@ class GenericIE(InfoExtractor):
 
 class YoutubeSearchIE(InfoExtractor):
        """Information Extractor for YouTube search queries."""
 
 class YoutubeSearchIE(InfoExtractor):
        """Information Extractor for YouTube search queries."""
-       _VALID_QUERY = r'ytsearch(\d+|all)?:[\s\S]+'
+       _VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+'
        _TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en'
        _VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
        _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
        _youtube_ie = None
        _max_youtube_results = 1000
        _TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en'
        _VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
        _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
        _youtube_ie = None
        _max_youtube_results = 1000
+       IE_NAME = u'youtube:search'
 
        def __init__(self, youtube_ie, downloader=None):
                InfoExtractor.__init__(self, downloader)
                self._youtube_ie = youtube_ie
 
 
        def __init__(self, youtube_ie, downloader=None):
                InfoExtractor.__init__(self, downloader)
                self._youtube_ie = youtube_ie
 
-       @staticmethod
-       def suitable(url):
-               return (re.match(YoutubeSearchIE._VALID_QUERY, url) is not None)
-
        def report_download_page(self, query, pagenum):
                """Report attempt to download playlist page with given number."""
                query = query.decode(preferredencoding())
        def report_download_page(self, query, pagenum):
                """Report attempt to download playlist page with given number."""
                query = query.decode(preferredencoding())
@@ -2148,14 +2223,14 @@ class YoutubeSearchIE(InfoExtractor):
                self._youtube_ie.initialize()
 
        def _real_extract(self, query):
                self._youtube_ie.initialize()
 
        def _real_extract(self, query):
-               mobj = re.match(self._VALID_QUERY, query)
+               mobj = re.match(self._VALID_URL, query)
                if mobj is None:
                        self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
                        return
 
                prefix, query = query.split(':')
                prefix = prefix[8:]
                if mobj is None:
                        self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
                        return
 
                prefix, query = query.split(':')
                prefix = prefix[8:]
-               query  = query.encode('utf-8')
+               query = query.encode('utf-8')
                if prefix == '':
                        self._download_n_results(query, 1)
                        return
                if prefix == '':
                        self._download_n_results(query, 1)
                        return
@@ -2169,7 +2244,7 @@ class YoutubeSearchIE(InfoExtractor):
                                        self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
                                        return
                                elif n > self._max_youtube_results:
                                        self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
                                        return
                                elif n > self._max_youtube_results:
-                                       self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)'  % (self._max_youtube_results, n))
+                                       self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
                                        n = self._max_youtube_results
                                self._download_n_results(query, n)
                                return
                                        n = self._max_youtube_results
                                self._download_n_results(query, n)
                                return
@@ -2213,23 +2288,21 @@ class YoutubeSearchIE(InfoExtractor):
 
                        pagenum = pagenum + 1
 
 
                        pagenum = pagenum + 1
 
+
 class GoogleSearchIE(InfoExtractor):
        """Information Extractor for Google Video search queries."""
 class GoogleSearchIE(InfoExtractor):
        """Information Extractor for Google Video search queries."""
-       _VALID_QUERY = r'gvsearch(\d+|all)?:[\s\S]+'
+       _VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+'
        _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
        _VIDEO_INDICATOR = r'videoplay\?docid=([^\&>]+)\&'
        _MORE_PAGES_INDICATOR = r'<span>Next</span>'
        _google_ie = None
        _max_google_results = 1000
        _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
        _VIDEO_INDICATOR = r'videoplay\?docid=([^\&>]+)\&'
        _MORE_PAGES_INDICATOR = r'<span>Next</span>'
        _google_ie = None
        _max_google_results = 1000
+       IE_NAME = u'video.google:search'
 
        def __init__(self, google_ie, downloader=None):
                InfoExtractor.__init__(self, downloader)
                self._google_ie = google_ie
 
 
        def __init__(self, google_ie, downloader=None):
                InfoExtractor.__init__(self, downloader)
                self._google_ie = google_ie
 
-       @staticmethod
-       def suitable(url):
-               return (re.match(GoogleSearchIE._VALID_QUERY, url) is not None)
-
        def report_download_page(self, query, pagenum):
                """Report attempt to download playlist page with given number."""
                query = query.decode(preferredencoding())
        def report_download_page(self, query, pagenum):
                """Report attempt to download playlist page with given number."""
                query = query.decode(preferredencoding())
@@ -2239,14 +2312,14 @@ class GoogleSearchIE(InfoExtractor):
                self._google_ie.initialize()
 
        def _real_extract(self, query):
                self._google_ie.initialize()
 
        def _real_extract(self, query):
-               mobj = re.match(self._VALID_QUERY, query)
+               mobj = re.match(self._VALID_URL, query)
                if mobj is None:
                        self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
                        return
 
                prefix, query = query.split(':')
                prefix = prefix[8:]
                if mobj is None:
                        self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
                        return
 
                prefix, query = query.split(':')
                prefix = prefix[8:]
-               query  = query.encode('utf-8')
+               query = query.encode('utf-8')
                if prefix == '':
                        self._download_n_results(query, 1)
                        return
                if prefix == '':
                        self._download_n_results(query, 1)
                        return
@@ -2260,7 +2333,7 @@ class GoogleSearchIE(InfoExtractor):
                                        self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
                                        return
                                elif n > self._max_google_results:
                                        self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
                                        return
                                elif n > self._max_google_results:
-                                       self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)'  % (self._max_google_results, n))
+                                       self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
                                        n = self._max_google_results
                                self._download_n_results(query, n)
                                return
                                        n = self._max_google_results
                                self._download_n_results(query, n)
                                return
@@ -2304,23 +2377,21 @@ class GoogleSearchIE(InfoExtractor):
 
                        pagenum = pagenum + 1
 
 
                        pagenum = pagenum + 1
 
+
 class YahooSearchIE(InfoExtractor):
        """Information Extractor for Yahoo! Video search queries."""
 class YahooSearchIE(InfoExtractor):
        """Information Extractor for Yahoo! Video search queries."""
-       _VALID_QUERY = r'yvsearch(\d+|all)?:[\s\S]+'
+       _VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+'
        _TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
        _VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
        _MORE_PAGES_INDICATOR = r'\s*Next'
        _yahoo_ie = None
        _max_yahoo_results = 1000
        _TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
        _VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
        _MORE_PAGES_INDICATOR = r'\s*Next'
        _yahoo_ie = None
        _max_yahoo_results = 1000
+       IE_NAME = u'video.yahoo:search'
 
        def __init__(self, yahoo_ie, downloader=None):
                InfoExtractor.__init__(self, downloader)
                self._yahoo_ie = yahoo_ie
 
 
        def __init__(self, yahoo_ie, downloader=None):
                InfoExtractor.__init__(self, downloader)
                self._yahoo_ie = yahoo_ie
 
-       @staticmethod
-       def suitable(url):
-               return (re.match(YahooSearchIE._VALID_QUERY, url) is not None)
-
        def report_download_page(self, query, pagenum):
                """Report attempt to download playlist page with given number."""
                query = query.decode(preferredencoding())
        def report_download_page(self, query, pagenum):
                """Report attempt to download playlist page with given number."""
                query = query.decode(preferredencoding())
@@ -2330,14 +2401,14 @@ class YahooSearchIE(InfoExtractor):
                self._yahoo_ie.initialize()
 
        def _real_extract(self, query):
                self._yahoo_ie.initialize()
 
        def _real_extract(self, query):
-               mobj = re.match(self._VALID_QUERY, query)
+               mobj = re.match(self._VALID_URL, query)
                if mobj is None:
                        self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
                        return
 
                prefix, query = query.split(':')
                prefix = prefix[8:]
                if mobj is None:
                        self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
                        return
 
                prefix, query = query.split(':')
                prefix = prefix[8:]
-               query  = query.encode('utf-8')
+               query = query.encode('utf-8')
                if prefix == '':
                        self._download_n_results(query, 1)
                        return
                if prefix == '':
                        self._download_n_results(query, 1)
                        return
@@ -2351,7 +2422,7 @@ class YahooSearchIE(InfoExtractor):
                                        self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
                                        return
                                elif n > self._max_yahoo_results:
                                        self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
                                        return
                                elif n > self._max_yahoo_results:
-                                       self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)'  % (self._max_yahoo_results, n))
+                                       self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
                                        n = self._max_yahoo_results
                                self._download_n_results(query, n)
                                return
                                        n = self._max_yahoo_results
                                self._download_n_results(query, n)
                                return
@@ -2395,23 +2466,21 @@ class YahooSearchIE(InfoExtractor):
 
                        pagenum = pagenum + 1
 
 
                        pagenum = pagenum + 1
 
+
 class YoutubePlaylistIE(InfoExtractor):
        """Information Extractor for YouTube playlists."""
 
 class YoutubePlaylistIE(InfoExtractor):
        """Information Extractor for YouTube playlists."""
 
-       _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists|artist)\?.*?(p|a)=|user/.*?/user/|p/|user/.*?#[pg]/c/)([0-9A-Za-z]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
+       _VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
        _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
        _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
        _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
        _youtube_ie = None
        _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
        _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
        _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
        _youtube_ie = None
+       IE_NAME = u'youtube:playlist'
 
        def __init__(self, youtube_ie, downloader=None):
                InfoExtractor.__init__(self, downloader)
                self._youtube_ie = youtube_ie
 
 
        def __init__(self, youtube_ie, downloader=None):
                InfoExtractor.__init__(self, downloader)
                self._youtube_ie = youtube_ie
 
-       @staticmethod
-       def suitable(url):
-               return (re.match(YoutubePlaylistIE._VALID_URL, url) is not None)
-
        def report_download_page(self, playlist_id, pagenum):
                """Report attempt to download playlist page with given number."""
                self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
        def report_download_page(self, playlist_id, pagenum):
                """Report attempt to download playlist page with given number."""
                self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
@@ -2471,28 +2540,26 @@ class YoutubePlaylistIE(InfoExtractor):
                        self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
                return
 
                        self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
                return
 
+
 class YoutubeUserIE(InfoExtractor):
        """Information Extractor for YouTube users."""
 
 class YoutubeUserIE(InfoExtractor):
        """Information Extractor for YouTube users."""
 
-       _VALID_URL = r'(?:(?:(?:http://)?(?:\w+\.)?youtube.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
+       _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
        _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
        _GDATA_PAGE_SIZE = 50
        _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
        _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
        _youtube_ie = None
        _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
        _GDATA_PAGE_SIZE = 50
        _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
        _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
        _youtube_ie = None
+       IE_NAME = u'youtube:user'
 
        def __init__(self, youtube_ie, downloader=None):
                InfoExtractor.__init__(self, downloader)
                self._youtube_ie = youtube_ie
 
 
        def __init__(self, youtube_ie, downloader=None):
                InfoExtractor.__init__(self, downloader)
                self._youtube_ie = youtube_ie
 
-       @staticmethod
-       def suitable(url):
-               return (re.match(YoutubeUserIE._VALID_URL, url) is not None)
-
        def report_download_page(self, username, start_index):
                """Report attempt to download user page."""
                self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
        def report_download_page(self, username, start_index):
                """Report attempt to download user page."""
                self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
-                                          (username, start_index, start_index + self._GDATA_PAGE_SIZE))
+                               (username, start_index, start_index + self._GDATA_PAGE_SIZE))
 
        def _real_initialize(self):
                self._youtube_ie.initialize()
 
        def _real_initialize(self):
                self._youtube_ie.initialize()
@@ -2556,7 +2623,7 @@ class YoutubeUserIE(InfoExtractor):
                        video_ids = video_ids[playliststart:playlistend]
 
                self._downloader.to_screen("[youtube] user %s: Collected %d video ids (downloading %d of them)" %
                        video_ids = video_ids[playliststart:playlistend]
 
                self._downloader.to_screen("[youtube] user %s: Collected %d video ids (downloading %d of them)" %
-                                                                 (username, all_ids_count, len(video_ids)))
+                               (username, all_ids_count, len(video_ids)))
 
                for video_id in video_ids:
                        self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % video_id)
 
                for video_id in video_ids:
                        self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % video_id)
@@ -2565,15 +2632,12 @@ class YoutubeUserIE(InfoExtractor):
 class DepositFilesIE(InfoExtractor):
        """Information extractor for depositfiles.com"""
 
 class DepositFilesIE(InfoExtractor):
        """Information extractor for depositfiles.com"""
 
-       _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles.com/(?:../(?#locale))?files/(.+)'
+       _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
+       IE_NAME = u'DepositFiles'
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
-       @staticmethod
-       def suitable(url):
-               return (re.match(DepositFilesIE._VALID_URL, url) is not None)
-
        def report_download_webpage(self, file_id):
                """Report webpage download."""
                self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id)
        def report_download_webpage(self, file_id):
                """Report webpage download."""
                self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id)
@@ -2641,25 +2705,24 @@ class DepositFilesIE(InfoExtractor):
                except UnavailableVideoError, err:
                        self._downloader.trouble(u'ERROR: unable to download file')
 
                except UnavailableVideoError, err:
                        self._downloader.trouble(u'ERROR: unable to download file')
 
+
 class FacebookIE(InfoExtractor):
        """Information Extractor for Facebook"""
 
 class FacebookIE(InfoExtractor):
        """Information Extractor for Facebook"""
 
-       _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook.com/video/video.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
+       _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
        _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
        _NETRC_MACHINE = 'facebook'
        _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
        _NETRC_MACHINE = 'facebook'
-       _available_formats = ['highqual', 'lowqual']
+       _available_formats = ['video', 'highqual', 'lowqual']
        _video_extensions = {
        _video_extensions = {
+               'video': 'mp4',
                'highqual': 'mp4',
                'lowqual': 'mp4',
        }
                'highqual': 'mp4',
                'lowqual': 'mp4',
        }
+       IE_NAME = u'facebook'
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
 
        def __init__(self, downloader=None):
                InfoExtractor.__init__(self, downloader)
 
-       @staticmethod
-       def suitable(url):
-               return (re.match(FacebookIE._VALID_URL, url) is not None)
-
        def _reporter(self, message):
                """Add header and report message."""
                self._downloader.to_screen(u'[facebook] %s' % message)
        def _reporter(self, message):
                """Add header and report message."""
                self._downloader.to_screen(u'[facebook] %s' % message)
@@ -2679,10 +2742,9 @@ class FacebookIE(InfoExtractor):
        def _parse_page(self, video_webpage):
                """Extract video information from page"""
                # General data
        def _parse_page(self, video_webpage):
                """Extract video information from page"""
                # General data
-               data = {'title': r'class="video_title datawrap">(.*?)</',
+               data = {'title': r'\("video_title", "(.*?)"\)',
                        'description': r'<div class="datawrap">(.*?)</div>',
                        'owner': r'\("video_owner_name", "(.*?)"\)',
                        'description': r'<div class="datawrap">(.*?)</div>',
                        'owner': r'\("video_owner_name", "(.*?)"\)',
-                       'upload_date': r'data-date="(.*?)"',
                        'thumbnail':  r'\("thumb_url", "(?P<THUMB>.*?)"\)',
                        }
                video_info = {}
                        'thumbnail':  r'\("thumb_url", "(?P<THUMB>.*?)"\)',
                        }
                video_info = {}
@@ -2825,6 +2887,8 @@ class FacebookIE(InfoExtractor):
                                return
                        if req_format is None:
                                video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
                                return
                        if req_format is None:
                                video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
+                       elif req_format == 'worst':
+                               video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
                        elif req_format == '-1':
                                video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
                        else:
                        elif req_format == '-1':
                                video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
                        else:
@@ -2865,14 +2929,15 @@ class BlipTVIE(InfoExtractor):
 
        _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$'
        _URL_EXT = r'^.*\.([a-z0-9]+)$'
 
        _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$'
        _URL_EXT = r'^.*\.([a-z0-9]+)$'
-
-       @staticmethod
-       def suitable(url):
-               return (re.match(BlipTVIE._VALID_URL, url) is not None)
+       IE_NAME = u'blip.tv'
 
        def report_extraction(self, file_id):
                """Report information extraction."""
 
        def report_extraction(self, file_id):
                """Report information extraction."""
-               self._downloader.to_screen(u'[blip.tv] %s: Extracting information' % file_id)
+               self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
+
+       def report_direct_download(self, title):
+               """Report information extraction."""
+               self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title))
 
        def _simplify_title(self, title):
                res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
 
        def _simplify_title(self, title):
                res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
@@ -2892,47 +2957,628 @@ class BlipTVIE(InfoExtractor):
                json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
                request = urllib2.Request(json_url)
                self.report_extraction(mobj.group(1))
                json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
                request = urllib2.Request(json_url)
                self.report_extraction(mobj.group(1))
+               info = None
                try:
                try:
-                       json_code = urllib2.urlopen(request).read()
+                       urlh = urllib2.urlopen(request)
+                       if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
+                               basename = url.split('/')[-1]
+                               title,ext = os.path.splitext(basename)
+                               ext = ext.replace('.', '')
+                               self.report_direct_download(title)
+                               info = {
+                                       'id': title,
+                                       'url': url,
+                                       'title': title,
+                                       'stitle': self._simplify_title(title),
+                                       'ext': ext,
+                                       'urlhandle': urlh
+                               }
                except (urllib2.URLError, httplib.HTTPException, socket.error), err:
                        self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
                        return
                except (urllib2.URLError, httplib.HTTPException, socket.error), err:
                        self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
                        return
+               if info is None: # Regular URL
+                       try:
+                               json_code = urlh.read()
+                       except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                               self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % str(err))
+                               return
+
+                       try:
+                               json_data = json.loads(json_code)
+                               if 'Post' in json_data:
+                                       data = json_data['Post']
+                               else:
+                                       data = json_data
+       
+                               upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
+                               video_url = data['media']['url']
+                               umobj = re.match(self._URL_EXT, video_url)
+                               if umobj is None:
+                                       raise ValueError('Can not determine filename extension')
+                               ext = umobj.group(1)
+       
+                               info = {
+                                       'id': data['item_id'],
+                                       'url': video_url,
+                                       'uploader': data['display_name'],
+                                       'upload_date': upload_date,
+                                       'title': data['title'],
+                                       'stitle': self._simplify_title(data['title']),
+                                       'ext': ext,
+                                       'format': data['media']['mimeType'],
+                                       'thumbnail': data['thumbnailUrl'],
+                                       'description': data['description'],
+                                       'player_url': data['embedUrl']
+                               }
+                       except (ValueError,KeyError), err:
+                               self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
+                               return
+
+               self._downloader.increment_downloads()
+
+               try:
+                       self._downloader.process_info(info)
+               except UnavailableVideoError, err:
+                       self._downloader.trouble(u'\nERROR: unable to download video')
+
+
+class MyVideoIE(InfoExtractor):
+       """Information Extractor for myvideo.de."""
+
+       _VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*'
+       IE_NAME = u'myvideo'
+
+       def __init__(self, downloader=None):
+               InfoExtractor.__init__(self, downloader)
+       
+       def report_download_webpage(self, video_id):
+               """Report webpage download."""
+               self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id)
+
+       def report_extraction(self, video_id):
+               """Report information extraction."""
+               self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id)
+
+       def _real_initialize(self):
+               return
+
+       def _real_extract(self,url):
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._download.trouble(u'ERROR: invalid URL: %s' % url)
+                       return
+
+               video_id = mobj.group(1)
+               simple_title = mobj.group(2).decode('utf-8')
+               # should actually not be necessary
+               simple_title = sanitize_title(simple_title)
+               simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', simple_title)
+
+               # Get video webpage
+               request = urllib2.Request('http://www.myvideo.de/watch/%s' % video_id)
                try:
                try:
-                       json_data = json.loads(json_code)
-                       if 'Post' in json_data:
-                               data = json_data['Post']
+                       self.report_download_webpage(video_id)
+                       webpage = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+                       return
+
+               self.report_extraction(video_id)
+               mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
+                                webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract media URL')
+                       return
+               video_url = mobj.group(1) + ('/%s.flv' % video_id)
+
+               mobj = re.search('<title>([^<]+)</title>', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract title')
+                       return
+
+               video_title = mobj.group(1)
+               video_title = sanitize_title(video_title)
+
+               try:
+                       self._downloader.process_info({
+                               'id':           video_id,
+                               'url':          video_url,
+                               'uploader':     u'NA',
+                               'upload_date':  u'NA',
+                               'title':        video_title,
+                               'stitle':       simple_title,
+                               'ext':          u'flv',
+                               'format':       u'NA',
+                               'player_url':   None,
+                       })
+               except UnavailableVideoError:
+                       self._downloader.trouble(u'\nERROR: Unable to download video')
+
+class ComedyCentralIE(InfoExtractor):
+       """Information extractor for The Daily Show and Colbert Report """
+
+       _VALID_URL = r'^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport))|(https?://)?(www\.)?(?P<showname>thedailyshow|colbertnation)\.com/full-episodes/(?P<episode>.*)$'
+       IE_NAME = u'comedycentral'
+
+       def report_extraction(self, episode_id):
+               self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
+       
+       def report_config_download(self, episode_id):
+               self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
+
+       def report_index_download(self, episode_id):
+               self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id)
+
+       def report_player_url(self, episode_id):
+               self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id)
+
+       def _simplify_title(self, title):
+               res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
+               res = res.strip(ur'_')
+               return res
+
+       def _real_extract(self, url):
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+                       return
+
+               if mobj.group('shortname'):
+                       if mobj.group('shortname') in ('tds', 'thedailyshow'):
+                               url = 'http://www.thedailyshow.com/full-episodes/'
                        else:
                        else:
-                               data = json_data
+                               url = 'http://www.colbertnation.com/full-episodes/'
+                       mobj = re.match(self._VALID_URL, url)
+                       assert mobj is not None
+
+               dlNewest = not mobj.group('episode')
+               if dlNewest:
+                       epTitle = mobj.group('showname')
+               else:
+                       epTitle = mobj.group('episode')
+
+               req = urllib2.Request(url)
+               self.report_extraction(epTitle)
+               try:
+                       htmlHandle = urllib2.urlopen(req)
+                       html = htmlHandle.read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err))
+                       return
+               if dlNewest:
+                       url = htmlHandle.geturl()
+                       mobj = re.match(self._VALID_URL, url)
+                       if mobj is None:
+                               self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url)
+                               return
+                       if mobj.group('episode') == '':
+                               self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url)
+                               return
+                       epTitle = mobj.group('episode')
+
+               mMovieParams = re.findall('<param name="movie" value="(http://media.mtvnservices.com/([^"]*episode.*?:.*?))"/>', html)
+               if len(mMovieParams) == 0:
+                       self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
+                       return
+
+               playerUrl_raw = mMovieParams[0][0]
+               self.report_player_url(epTitle)
+               try:
+                       urlHandle = urllib2.urlopen(playerUrl_raw)
+                       playerUrl = urlHandle.geturl()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to find out player URL: ' + unicode(err))
+                       return
+
+               uri = mMovieParams[0][1]
+               indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + urllib.urlencode({'uri': uri})
+               self.report_index_download(epTitle)
+               try:
+                       indexXml = urllib2.urlopen(indexUrl).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download episode index: ' + unicode(err))
+                       return
+
+               idoc = xml.etree.ElementTree.fromstring(indexXml)
+               itemEls = idoc.findall('.//item')
+               for itemEl in itemEls:
+                       mediaId = itemEl.findall('./guid')[0].text
+                       shortMediaId = mediaId.split(':')[-1]
+                       showId = mediaId.split(':')[-2].replace('.com', '')
+                       officialTitle = itemEl.findall('./title')[0].text
+                       officialDate = itemEl.findall('./pubDate')[0].text
+
+                       configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
+                                               urllib.urlencode({'uri': mediaId}))
+                       configReq = urllib2.Request(configUrl)
+                       self.report_config_download(epTitle)
+                       try:
+                               configXml = urllib2.urlopen(configReq).read()
+                       except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                               self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err))
+                               return
+
+                       cdoc = xml.etree.ElementTree.fromstring(configXml)
+                       turls = []
+                       for rendition in cdoc.findall('.//rendition'):
+                               finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
+                               turls.append(finfo)
+
+                       if len(turls) == 0:
+                               self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found')
+                               continue
 
 
-                       upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
-                       video_url = data['media']['url']
-                       umobj = re.match(self._URL_EXT, video_url)
-                       if umobj is None:
-                               raise ValueError('Can not determine filename extension')
-                       ext = umobj.group(1)
+                       # For now, just pick the highest bitrate
+                       format,video_url = turls[-1]
 
                        self._downloader.increment_downloads()
 
 
                        self._downloader.increment_downloads()
 
+                       effTitle = showId + '-' + epTitle
                        info = {
                        info = {
-                               'id': data['item_id'],
+                               'id': shortMediaId,
                                'url': video_url,
                                'url': video_url,
-                               'uploader': data['display_name'],
-                               'upload_date': upload_date,
-                               'title': data['title'],
-                               'stitle': self._simplify_title(data['title']),
-                               'ext': ext,
-                               'format': data['media']['mimeType'],
-                               'thumbnail': data['thumbnailUrl'],
-                               'description': data['description'],
-                               'player_url': data['embedUrl']
+                               'uploader': showId,
+                               'upload_date': officialDate,
+                               'title': effTitle,
+                               'stitle': self._simplify_title(effTitle),
+                               'ext': 'mp4',
+                               'format': format,
+                               'thumbnail': None,
+                               'description': officialTitle,
+                               'player_url': playerUrl
                        }
                        }
-               except (ValueError,KeyError), err:
-                       self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
+
+                       try:
+                               self._downloader.process_info(info)
+                       except UnavailableVideoError, err:
+                               self._downloader.trouble(u'\nERROR: unable to download ' + mediaId)
+                               continue
+
+
+class EscapistIE(InfoExtractor):
+       """Information extractor for The Escapist """
+
+       _VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
+       IE_NAME = u'escapist'
+
+       def report_extraction(self, showName):
+               self._downloader.to_screen(u'[escapist] %s: Extracting information' % showName)
+
+       def report_config_download(self, showName):
+               self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName)
+
+       def _simplify_title(self, title):
+               res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
+               res = res.strip(ur'_')
+               return res
+
+       def _real_extract(self, url):
+               htmlParser = HTMLParser.HTMLParser()
+
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+                       return
+               showName = mobj.group('showname')
+               videoId = mobj.group('episode')
+
+               self.report_extraction(showName)
+               try:
+                       webPage = urllib2.urlopen(url).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download webpage: ' + unicode(err))
+                       return
+
+               descMatch = re.search('<meta name="description" content="([^"]*)"', webPage)
+               description = htmlParser.unescape(descMatch.group(1))
+               imgMatch = re.search('<meta property="og:image" content="([^"]*)"', webPage)
+               imgUrl = htmlParser.unescape(imgMatch.group(1))
+               playerUrlMatch = re.search('<meta property="og:video" content="([^"]*)"', webPage)
+               playerUrl = htmlParser.unescape(playerUrlMatch.group(1))
+               configUrlMatch = re.search('config=(.*)$', playerUrl)
+               configUrl = urllib2.unquote(configUrlMatch.group(1))
+
+               self.report_config_download(showName)
+               try:
+                       configJSON = urllib2.urlopen(configUrl).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download configuration: ' + unicode(err))
+                       return
+
+               # Technically, it's JavaScript, not JSON
+               configJSON = configJSON.replace("'", '"')
+
+               try:
+                       config = json.loads(configJSON)
+               except (ValueError,), err:
+                       self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + unicode(err))
+                       return
+
+               playlist = config['playlist']
+               videoUrl = playlist[1]['url']
+
+               self._downloader.increment_downloads()
+               info = {
+                       'id': videoId,
+                       'url': videoUrl,
+                       'uploader': showName,
+                       'upload_date': None,
+                       'title': showName,
+                       'stitle': self._simplify_title(showName),
+                       'ext': 'flv',
+                       'format': 'flv',
+                       'thumbnail': imgUrl,
+                       'description': description,
+                       'player_url': playerUrl,
+               }
+
+               try:
+                       self._downloader.process_info(info)
+               except UnavailableVideoError, err:
+                       self._downloader.trouble(u'\nERROR: unable to download ' + videoId)
+
+
+class CollegeHumorIE(InfoExtractor):
+       """Information extractor for collegehumor.com"""
+
+       _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$'
+       IE_NAME = u'collegehumor'
+
+       def report_webpage(self, video_id):
+               """Report information extraction."""
+               self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
+
+       def report_extraction(self, video_id):
+               """Report information extraction."""
+               self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+       def _simplify_title(self, title):
+               res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
+               res = res.strip(ur'_')
+               return res
+
+       def _real_extract(self, url):
+               htmlParser = HTMLParser.HTMLParser()
+
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+                       return
+               video_id = mobj.group('videoid')
+
+               self.report_webpage(video_id)
+               request = urllib2.Request(url)
+               try:
+                       webpage = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
+                       return
+
+               m = re.search(r'id="video:(?P<internalvideoid>[0-9]+)"', webpage)
+               if m is None:
+                       self._downloader.trouble(u'ERROR: Cannot extract internal video ID')
+                       return
+               internal_video_id = m.group('internalvideoid')
+
+               info = {
+                       'id': video_id,
+                       'internal_id': internal_video_id,
+               }
+
+               self.report_extraction(video_id)
+               xmlUrl = 'http://www.collegehumor.com/moogaloop/video:' + internal_video_id
+               try:
+                       metaXml = urllib2.urlopen(xmlUrl).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % str(err))
+                       return
+
+               mdoc = xml.etree.ElementTree.fromstring(metaXml)
+               try:
+                       videoNode = mdoc.findall('./video')[0]
+                       info['description'] = videoNode.findall('./description')[0].text
+                       info['title'] = videoNode.findall('./caption')[0].text
+                       info['stitle'] = self._simplify_title(info['title'])
+                       info['url'] = videoNode.findall('./file')[0].text
+                       info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
+                       info['ext'] = info['url'].rpartition('.')[2]
+                       info['format'] = info['ext']
+               except IndexError:
+                       self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
+                       return
+
+               self._downloader.increment_downloads()
+
+               try:
+                       self._downloader.process_info(info)
+               except UnavailableVideoError, err:
+                       self._downloader.trouble(u'\nERROR: unable to download video')
+
+
+class XVideosIE(InfoExtractor):
+       """Information extractor for xvideos.com"""
+
+       _VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
+       IE_NAME = u'xvideos'
+
+       def report_webpage(self, video_id):
+               """Report information extraction."""
+               self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
+
+       def report_extraction(self, video_id):
+               """Report information extraction."""
+               self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+       def _simplify_title(self, title):
+               res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title)
+               res = res.strip(ur'_')
+               return res
+
+       def _real_extract(self, url):
+               htmlParser = HTMLParser.HTMLParser()
+
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+                       return
+               video_id = mobj.group(1).decode('utf-8')
+
+               self.report_webpage(video_id)
+
+               request = urllib2.Request(r'http://www.xvideos.com/video' + video_id)
+               try:
+                       webpage = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
+                       return
+
+               self.report_extraction(video_id)
+
+
+               # Extract video URL
+               mobj = re.search(r'flv_url=(.+?)&', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract video url')
+                       return
+               video_url = urllib2.unquote(mobj.group(1).decode('utf-8'))
+
+
+               # Extract title
+               mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract video title')
+                       return
+               video_title = mobj.group(1).decode('utf-8')
+
+
+               # Extract video thumbnail
+               mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]/[a-fA-F0-9]/[a-fA-F0-9]/([a-fA-F0-9.]+jpg)', webpage)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
                        return
                        return
+               video_thumbnail = mobj.group(1).decode('utf-8')
+
+
+
+               self._downloader.increment_downloads()
+               info = {
+                       'id': video_id,
+                       'url': video_url,
+                       'uploader': None,
+                       'upload_date': None,
+                       'title': video_title,
+                       'stitle': self._simplify_title(video_title),
+                       'ext': 'flv',
+                       'format': 'flv',
+                       'thumbnail': video_thumbnail,
+                       'description': None,
+                       'player_url': None,
+               }
 
                try:
                        self._downloader.process_info(info)
                except UnavailableVideoError, err:
 
                try:
                        self._downloader.process_info(info)
                except UnavailableVideoError, err:
+                       self._downloader.trouble(u'\nERROR: unable to download ' + video_id)
+
+
+class SoundcloudIE(InfoExtractor):
+       """Information extractor for soundcloud.com
+          To access the media, the uid of the song and a stream token
+          must be extracted from the page source and the script must make
+          a request to media.soundcloud.com/crossdomain.xml. Then
+          the media can be grabbed by requesting from an url composed
+          of the stream token and uid
+        """
+
+       _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)'
+       IE_NAME = u'soundcloud'
+
+       def __init__(self, downloader=None):
+               InfoExtractor.__init__(self, downloader)
+
+       def report_webpage(self, video_id):
+               """Report information extraction."""
+               self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
+
+       def report_extraction(self, video_id):
+               """Report information extraction."""
+               self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+       def _real_initialize(self):
+               return
+
+       def _real_extract(self, url):
+               htmlParser = HTMLParser.HTMLParser()
+
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+                       return
+
+               # extract uploader (which is in the url)
+               uploader = mobj.group(1).decode('utf-8')
+               # extract simple title (uploader + slug of song title)
+               slug_title =  mobj.group(2).decode('utf-8')
+               simple_title = uploader + '-' + slug_title
+
+               self.report_webpage('%s/%s' % (uploader, slug_title))
+
+               request = urllib2.Request('http://soundcloud.com/%s/%s' % (uploader, slug_title))
+               try:
+                       webpage = urllib2.urlopen(request).read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
+                       return
+
+               self.report_extraction('%s/%s' % (uploader, slug_title))
+
+               # extract uid and stream token that soundcloud hands out for access
+               mobj = re.search('"uid":"([\w\d]+?)".*?stream_token=([\w\d]+)', webpage)   
+               if mobj:
+                       video_id = mobj.group(1)
+                       stream_token = mobj.group(2)
+
+               # extract unsimplified title
+               mobj = re.search('"title":"(.*?)",', webpage)
+               if mobj:
+                       title = mobj.group(1)
+
+               # construct media url (with uid/token)
+               mediaURL = "http://media.soundcloud.com/stream/%s?stream_token=%s"
+               mediaURL = mediaURL % (video_id, stream_token)
+
+               # description
+               description = u'No description available'
+               mobj = re.search('track-description-value"><p>(.*?)</p>', webpage)
+               if mobj:
+                       description = mobj.group(1)
+               
+               # upload date
+               upload_date = None
+               mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage)
+               if mobj:
+                       try:
+                               upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d')
+                       except Exception as e:
+                               print str(e)
+
+               # for soundcloud, a request to a cross domain is required for cookies
+               request = urllib2.Request('http://media.soundcloud.com/crossdomain.xml', std_headers)
+
+               try:
+                       self._downloader.process_info({
+                               'id':           video_id.decode('utf-8'),
+                               'url':          mediaURL,
+                               'uploader':     uploader.decode('utf-8'),
+                               'upload_date':  upload_date,
+                               'title':        simple_title.decode('utf-8'),
+                               'stitle':       simple_title.decode('utf-8'),
+                               'ext':          u'mp3',
+                               'format':       u'NA',
+                               'player_url':   None,
+                               'description': description.decode('utf-8')
+                       })
+               except UnavailableVideoError:
                        self._downloader.trouble(u'\nERROR: unable to download video')
 
 
                        self._downloader.trouble(u'\nERROR: unable to download video')
 
 
@@ -2982,13 +3628,16 @@ class PostProcessor(object):
                """
                return information # by default, do nothing
 
                """
                return information # by default, do nothing
 
+
 class FFmpegExtractAudioPP(PostProcessor):
 
 class FFmpegExtractAudioPP(PostProcessor):
 
-       def __init__(self, downloader=None, preferredcodec=None):
+       def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False):
                PostProcessor.__init__(self, downloader)
                if preferredcodec is None:
                        preferredcodec = 'best'
                self._preferredcodec = preferredcodec
                PostProcessor.__init__(self, downloader)
                if preferredcodec is None:
                        preferredcodec = 'best'
                self._preferredcodec = preferredcodec
+               self._preferredquality = preferredquality
+               self._keepvideo = keepvideo
 
        @staticmethod
        def get_audio_codec(path):
 
        @staticmethod
        def get_audio_codec(path):
@@ -3027,24 +3676,32 @@ class FFmpegExtractAudioPP(PostProcessor):
 
                more_opts = []
                if self._preferredcodec == 'best' or self._preferredcodec == filecodec:
 
                more_opts = []
                if self._preferredcodec == 'best' or self._preferredcodec == filecodec:
-                       if filecodec == 'aac' or filecodec == 'mp3':
+                       if filecodec in ['aac', 'mp3', 'vorbis']:
                                # Lossless if possible
                                acodec = 'copy'
                                extension = filecodec
                                if filecodec == 'aac':
                                        more_opts = ['-f', 'adts']
                                # Lossless if possible
                                acodec = 'copy'
                                extension = filecodec
                                if filecodec == 'aac':
                                        more_opts = ['-f', 'adts']
+                               if filecodec == 'vorbis':
+                                       extension = 'ogg'
                        else:
                                # MP3 otherwise.
                                acodec = 'libmp3lame'
                                extension = 'mp3'
                        else:
                                # MP3 otherwise.
                                acodec = 'libmp3lame'
                                extension = 'mp3'
-                               more_opts = ['-ab', '128k']
+                               more_opts = []
+                               if self._preferredquality is not None:
+                                       more_opts += ['-ab', self._preferredquality]
                else:
                        # We convert the audio (lossy)
                else:
                        # We convert the audio (lossy)
-                       acodec = {'mp3': 'libmp3lame', 'aac': 'aac'}[self._preferredcodec]
+                       acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'vorbis': 'libvorbis'}[self._preferredcodec]
                        extension = self._preferredcodec
                        extension = self._preferredcodec
-                       more_opts = ['-ab', '128k']
+                       more_opts = []
+                       if self._preferredquality is not None:
+                               more_opts += ['-ab', self._preferredquality]
                        if self._preferredcodec == 'aac':
                                more_opts += ['-f', 'adts']
                        if self._preferredcodec == 'aac':
                                more_opts += ['-f', 'adts']
+                       if self._preferredcodec == 'vorbis':
+                               extension = 'ogg'
 
                (prefix, ext) = os.path.splitext(path)
                new_path = prefix + '.' + extension
 
                (prefix, ext) = os.path.splitext(path)
                new_path = prefix + '.' + extension
@@ -3055,11 +3712,19 @@ class FFmpegExtractAudioPP(PostProcessor):
                        self._downloader.to_stderr(u'WARNING: error running ffmpeg')
                        return None
 
                        self._downloader.to_stderr(u'WARNING: error running ffmpeg')
                        return None
 
-               try:
-                       os.remove(path)
-               except (IOError, OSError):
-                       self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file')
-                       return None
+               # Try to update the date time for extracted audio file.
+               if information.get('filetime') is not None:
+                       try:
+                               os.utime(new_path, (time.time(), information['filetime']))
+                       except:
+                               self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file')
+
+               if not self._keepvideo:
+                       try:
+                               os.remove(path)
+                       except (IOError, OSError):
+                               self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file')
+                               return None
 
                information['filepath'] = new_path
                return information
 
                information['filepath'] = new_path
                return information
@@ -3071,24 +3736,32 @@ def updateSelf(downloader, filename):
        if not os.access(filename, os.W_OK):
                sys.exit('ERROR: no write permissions on %s' % filename)
 
        if not os.access(filename, os.W_OK):
                sys.exit('ERROR: no write permissions on %s' % filename)
 
-       downloader.to_screen('Updating to latest stable version...')
+       downloader.to_screen('Updating to latest version...')
 
        try:
 
        try:
-               latest_url = 'http://github.com/rg3/youtube-dl/raw/master/LATEST_VERSION'
-               latest_version = urllib.urlopen(latest_url).read().strip()
-               prog_url = 'http://github.com/rg3/youtube-dl/raw/%s/youtube-dl' % latest_version
-               newcontent = urllib.urlopen(prog_url).read()
+               try:
+                       urlh = urllib.urlopen(UPDATE_URL)
+                       newcontent = urlh.read()
+                       
+                       vmatch = re.search("__version__ = '([^']+)'", newcontent)
+                       if vmatch is not None and vmatch.group(1) == __version__:
+                               downloader.to_screen('youtube-dl is up-to-date (' + __version__ + ')')
+                               return
+               finally:
+                       urlh.close()
        except (IOError, OSError), err:
                sys.exit('ERROR: unable to download latest version')
 
        try:
        except (IOError, OSError), err:
                sys.exit('ERROR: unable to download latest version')
 
        try:
-               stream = open(filename, 'wb')
-               stream.write(newcontent)
-               stream.close()
+               outf = open(filename, 'wb')
+               try:
+                       outf.write(newcontent)
+               finally:
+                       outf.close()
        except (IOError, OSError), err:
                sys.exit('ERROR: unable to overwrite current version')
 
        except (IOError, OSError), err:
                sys.exit('ERROR: unable to overwrite current version')
 
-       downloader.to_screen('Updated to version %s' % latest_version)
+       downloader.to_screen('Updated youtube-dl. Restart youtube-dl to use the new version.')
 
 def parseOpts():
        # Deferred imports
 
 def parseOpts():
        # Deferred imports
@@ -3134,7 +3807,7 @@ def parseOpts():
        kw = {
                'version'   : __version__,
                'formatter' : fmt,
        kw = {
                'version'   : __version__,
                'formatter' : fmt,
-               'usage' : '%prog [options] url...',
+               'usage' : '%prog [options] url [url...]',
                'conflict_handler' : 'resolve',
        }
 
                'conflict_handler' : 'resolve',
        }
 
@@ -3142,6 +3815,7 @@ def parseOpts():
 
        # option groups
        general        = optparse.OptionGroup(parser, 'General Options')
 
        # option groups
        general        = optparse.OptionGroup(parser, 'General Options')
+       selection      = optparse.OptionGroup(parser, 'Video Selection')
        authentication = optparse.OptionGroup(parser, 'Authentication Options')
        video_format   = optparse.OptionGroup(parser, 'Video Format Options')
        postproc       = optparse.OptionGroup(parser, 'Post-processing Options')
        authentication = optparse.OptionGroup(parser, 'Authentication Options')
        video_format   = optparse.OptionGroup(parser, 'Video Format Options')
        postproc       = optparse.OptionGroup(parser, 'Post-processing Options')
@@ -3153,20 +3827,26 @@ def parseOpts():
        general.add_option('-v', '--version',
                        action='version', help='print program version and exit')
        general.add_option('-U', '--update',
        general.add_option('-v', '--version',
                        action='version', help='print program version and exit')
        general.add_option('-U', '--update',
-                       action='store_true', dest='update_self', help='update this program to latest stable version')
+                       action='store_true', dest='update_self', help='update this program to latest version')
        general.add_option('-i', '--ignore-errors',
                        action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
        general.add_option('-r', '--rate-limit',
                        dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
        general.add_option('-R', '--retries',
                        dest='retries', metavar='RETRIES', help='number of retries (default is 10)', default=10)
        general.add_option('-i', '--ignore-errors',
                        action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
        general.add_option('-r', '--rate-limit',
                        dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
        general.add_option('-R', '--retries',
                        dest='retries', metavar='RETRIES', help='number of retries (default is 10)', default=10)
-       general.add_option('--playlist-start',
-                       dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is 1)', default=1)
-       general.add_option('--playlist-end',
-                       dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
        general.add_option('--dump-user-agent',
                        action='store_true', dest='dump_user_agent',
                        help='display the current browser identification', default=False)
        general.add_option('--dump-user-agent',
                        action='store_true', dest='dump_user_agent',
                        help='display the current browser identification', default=False)
+       general.add_option('--list-extractors',
+                       action='store_true', dest='list_extractors',
+                       help='List all supported extractors and the URLs they would handle', default=False)
+
+       selection.add_option('--playlist-start',
+                       dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is 1)', default=1)
+       selection.add_option('--playlist-end',
+                       dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
+       selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
+       selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
 
        authentication.add_option('-u', '--username',
                        dest='username', metavar='USERNAME', help='account username')
 
        authentication.add_option('-u', '--username',
                        dest='username', metavar='USERNAME', help='account username')
@@ -3179,15 +3859,19 @@ def parseOpts():
        video_format.add_option('-f', '--format',
                        action='store', dest='format', metavar='FORMAT', help='video format code')
        video_format.add_option('--all-formats',
        video_format.add_option('-f', '--format',
                        action='store', dest='format', metavar='FORMAT', help='video format code')
        video_format.add_option('--all-formats',
-                       action='store_const', dest='format', help='download all available video formats', const='-1')
+                       action='store_const', dest='format', help='download all available video formats', const='all')
        video_format.add_option('--max-quality',
                        action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
        video_format.add_option('--max-quality',
                        action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
+       video_format.add_option('-F', '--list-formats',
+                       action='store_true', dest='listformats', help='list all available formats (currently youtube only)')
 
 
        verbosity.add_option('-q', '--quiet',
                        action='store_true', dest='quiet', help='activates quiet mode', default=False)
        verbosity.add_option('-s', '--simulate',
 
 
        verbosity.add_option('-q', '--quiet',
                        action='store_true', dest='quiet', help='activates quiet mode', default=False)
        verbosity.add_option('-s', '--simulate',
-                       action='store_true', dest='simulate', help='do not download video', default=False)
+                       action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
+       verbosity.add_option('--skip-download',
+                       action='store_true', dest='skip_download', help='do not download the video', default=False)
        verbosity.add_option('-g', '--get-url',
                        action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
        verbosity.add_option('-e', '--get-title',
        verbosity.add_option('-g', '--get-url',
                        action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
        verbosity.add_option('-e', '--get-title',
@@ -3201,6 +3885,9 @@ def parseOpts():
        verbosity.add_option('--get-filename',
                        action='store_true', dest='getfilename',
                        help='simulate, quiet but print output filename', default=False)
        verbosity.add_option('--get-filename',
                        action='store_true', dest='getfilename',
                        help='simulate, quiet but print output filename', default=False)
+       verbosity.add_option('--get-format',
+                       action='store_true', dest='getformat',
+                       help='simulate, quiet but print output format', default=False)
        verbosity.add_option('--no-progress',
                        action='store_true', dest='noprogress', help='do not print progress bar', default=False)
        verbosity.add_option('--console-title',
        verbosity.add_option('--no-progress',
                        action='store_true', dest='noprogress', help='do not print progress bar', default=False)
        verbosity.add_option('--console-title',
@@ -3216,15 +3903,18 @@ def parseOpts():
                        action='store_true', dest='autonumber',
                        help='number downloaded files starting from 00000', default=False)
        filesystem.add_option('-o', '--output',
                        action='store_true', dest='autonumber',
                        help='number downloaded files starting from 00000', default=False)
        filesystem.add_option('-o', '--output',
-                       dest='outtmpl', metavar='TEMPLATE', help='output filename template')
+                       dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(stitle)s to get the title, %(uploader)s for the uploader name, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, and %% for a literal percent')
        filesystem.add_option('-a', '--batch-file',
                        dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
        filesystem.add_option('-w', '--no-overwrites',
                        action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
        filesystem.add_option('-c', '--continue',
                        action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
        filesystem.add_option('-a', '--batch-file',
                        dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
        filesystem.add_option('-w', '--no-overwrites',
                        action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
        filesystem.add_option('-c', '--continue',
                        action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
+       filesystem.add_option('--no-continue',
+                       action='store_false', dest='continue_dl',
+                       help='do not resume partially downloaded files (restart from beginning)')
        filesystem.add_option('--cookies',
        filesystem.add_option('--cookies',
-                       dest='cookiefile', metavar='FILE', help='file to dump cookie jar to')
+                       dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
        filesystem.add_option('--no-part',
                        action='store_true', dest='nopart', help='do not use .part files', default=False)
        filesystem.add_option('--no-mtime',
        filesystem.add_option('--no-part',
                        action='store_true', dest='nopart', help='do not use .part files', default=False)
        filesystem.add_option('--no-mtime',
@@ -3241,10 +3931,15 @@ def parseOpts():
        postproc.add_option('--extract-audio', action='store_true', dest='extractaudio', default=False,
                        help='convert video files to audio-only files (requires ffmpeg and ffprobe)')
        postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
        postproc.add_option('--extract-audio', action='store_true', dest='extractaudio', default=False,
                        help='convert video files to audio-only files (requires ffmpeg and ffprobe)')
        postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
-                       help='"best", "aac" or "mp3"; best by default')
+                       help='"best", "aac", "vorbis" or "mp3"; best by default')
+       postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='128K',
+                       help='ffmpeg audio bitrate specification, 128k by default')
+       postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
+                       help='keeps the video file on disk after the post-processing; the video is erased by default')
 
 
        parser.add_option_group(general)
 
 
        parser.add_option_group(general)
+       parser.add_option_group(selection)
        parser.add_option_group(filesystem)
        parser.add_option_group(verbosity)
        parser.add_option_group(video_format)
        parser.add_option_group(filesystem)
        parser.add_option_group(verbosity)
        parser.add_option_group(video_format)
@@ -3255,6 +3950,39 @@ def parseOpts():
 
        return parser, opts, args
 
 
        return parser, opts, args
 
+def gen_extractors():
+       """ Return a list of an instance of every supported extractor.
+       The order does matter; the first extractor matched is the one handling the URL.
+       """
+       youtube_ie = YoutubeIE()
+       google_ie = GoogleIE()
+       yahoo_ie = YahooIE()
+       return [
+               YoutubePlaylistIE(youtube_ie),
+               YoutubeUserIE(youtube_ie),
+               YoutubeSearchIE(youtube_ie),
+               youtube_ie,
+               MetacafeIE(youtube_ie),
+               DailymotionIE(),
+               google_ie,
+               GoogleSearchIE(google_ie),
+               PhotobucketIE(),
+               yahoo_ie,
+               YahooSearchIE(yahoo_ie),
+               DepositFilesIE(),
+               FacebookIE(),
+               BlipTVIE(),
+               VimeoIE(),
+               MyVideoIE(),
+               ComedyCentralIE(),
+               EscapistIE(),
+               CollegeHumorIE(),
+               XVideosIE(),
+        SoundcloudIE(),
+
+               GenericIE()
+       ]
+
 def main():
        parser, opts, args = parseOpts()
 
 def main():
        parser, opts, args = parseOpts()
 
@@ -3274,11 +4002,6 @@ def main():
                print std_headers['User-Agent']
                sys.exit(0)
 
                print std_headers['User-Agent']
                sys.exit(0)
 
-       # General configuration
-       cookie_processor = urllib2.HTTPCookieProcessor(jar)
-       urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler(), cookie_processor, YoutubeDLHandler()))
-       socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
-
        # Batch file verification
        batchurls = []
        if opts.batchfile is not None:
        # Batch file verification
        batchurls = []
        if opts.batchfile is not None:
@@ -3294,6 +4017,23 @@ def main():
                        sys.exit(u'ERROR: batch file could not be read')
        all_urls = batchurls + args
 
                        sys.exit(u'ERROR: batch file could not be read')
        all_urls = batchurls + args
 
+       # General configuration
+       cookie_processor = urllib2.HTTPCookieProcessor(jar)
+       opener = urllib2.build_opener(urllib2.ProxyHandler(), cookie_processor, YoutubeDLHandler())
+       urllib2.install_opener(opener)
+       socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
+
+       extractors = gen_extractors()
+
+       if opts.list_extractors:
+               for ie in extractors:
+                       print(ie.IE_NAME)
+                       matchedUrls = filter(lambda url: ie.suitable(url), all_urls)
+                       all_urls = filter(lambda url: url not in matchedUrls, all_urls)
+                       for mu in matchedUrls:
+                               print(u'  ' + mu)
+               sys.exit(0)
+
        # Conflicting, missing and erroneous options
        if opts.usenetrc and (opts.username is not None or opts.password is not None):
                parser.error(u'using .netrc conflicts with giving username/password')
        # Conflicting, missing and erroneous options
        if opts.usenetrc and (opts.username is not None or opts.password is not None):
                parser.error(u'using .netrc conflicts with giving username/password')
@@ -3328,41 +4068,26 @@ def main():
        except (TypeError, ValueError), err:
                parser.error(u'invalid playlist end number specified')
        if opts.extractaudio:
        except (TypeError, ValueError), err:
                parser.error(u'invalid playlist end number specified')
        if opts.extractaudio:
-               if opts.audioformat not in ['best', 'aac', 'mp3']:
+               if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis']:
                        parser.error(u'invalid audio format specified')
 
                        parser.error(u'invalid audio format specified')
 
-       # Information extractors
-       youtube_ie = YoutubeIE()
-       metacafe_ie = MetacafeIE(youtube_ie)
-       dailymotion_ie = DailymotionIE()
-       youtube_pl_ie = YoutubePlaylistIE(youtube_ie)
-       youtube_user_ie = YoutubeUserIE(youtube_ie)
-       youtube_search_ie = YoutubeSearchIE(youtube_ie)
-       google_ie = GoogleIE()
-       google_search_ie = GoogleSearchIE(google_ie)
-       photobucket_ie = PhotobucketIE()
-       yahoo_ie = YahooIE()
-       yahoo_search_ie = YahooSearchIE(yahoo_ie)
-       deposit_files_ie = DepositFilesIE()
-       facebook_ie = FacebookIE()
-       bliptv_ie = BlipTVIE()
-       vimeo_ie = VimeoIE()
-       generic_ie = GenericIE()
-
        # File downloader
        fd = FileDownloader({
                'usenetrc': opts.usenetrc,
                'username': opts.username,
                'password': opts.password,
        # File downloader
        fd = FileDownloader({
                'usenetrc': opts.usenetrc,
                'username': opts.username,
                'password': opts.password,
-               'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename),
+               'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
                'forceurl': opts.geturl,
                'forcetitle': opts.gettitle,
                'forcethumbnail': opts.getthumbnail,
                'forcedescription': opts.getdescription,
                'forcefilename': opts.getfilename,
                'forceurl': opts.geturl,
                'forcetitle': opts.gettitle,
                'forcethumbnail': opts.getthumbnail,
                'forcedescription': opts.getdescription,
                'forcefilename': opts.getfilename,
-               'simulate': (opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename),
+               'forceformat': opts.getformat,
+               'simulate': opts.simulate,
+               'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
                'format': opts.format,
                'format_limit': opts.format_limit,
                'format': opts.format,
                'format_limit': opts.format_limit,
+               'listformats': opts.listformats,
                'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
                        or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s')
                        or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s')
                'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
                        or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s')
                        or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s')
@@ -3387,30 +4112,15 @@ def main():
                'updatetime': opts.updatetime,
                'writedescription': opts.writedescription,
                'writeinfojson': opts.writeinfojson,
                'updatetime': opts.updatetime,
                'writedescription': opts.writedescription,
                'writeinfojson': opts.writeinfojson,
+               'matchtitle': opts.matchtitle,
+               'rejecttitle': opts.rejecttitle,
                })
                })
-       fd.add_info_extractor(youtube_search_ie)
-       fd.add_info_extractor(youtube_pl_ie)
-       fd.add_info_extractor(youtube_user_ie)
-       fd.add_info_extractor(metacafe_ie)
-       fd.add_info_extractor(dailymotion_ie)
-       fd.add_info_extractor(youtube_ie)
-       fd.add_info_extractor(google_ie)
-       fd.add_info_extractor(google_search_ie)
-       fd.add_info_extractor(photobucket_ie)
-       fd.add_info_extractor(yahoo_ie)
-       fd.add_info_extractor(yahoo_search_ie)
-       fd.add_info_extractor(deposit_files_ie)
-       fd.add_info_extractor(facebook_ie)
-       fd.add_info_extractor(bliptv_ie)
-       fd.add_info_extractor(vimeo_ie)
-
-       # This must come last since it's the
-       # fallback if none of the others work
-       fd.add_info_extractor(generic_ie)
+       for extractor in extractors:
+               fd.add_info_extractor(extractor)
 
        # PostProcessors
        if opts.extractaudio:
 
        # PostProcessors
        if opts.extractaudio:
-               fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat))
+               fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, keepvideo=opts.keepvideo))
 
        # Update version
        if opts.update_self:
 
        # Update version
        if opts.update_self: