fixed PEP8 whitespace issues
[youtube-dl.git] / youtube-dl
index c8c2ea8..36ca6ba 100755 (executable)
@@ -6,6 +6,7 @@
 # Author: Vasyl' Vavrychuk
 # Author: Witold Baryluk
 # Author: PaweÅ‚ Paprota
+# Author: Gergely Imreh
 # License: Public domain code
 import cookielib
 import ctypes
@@ -37,7 +38,7 @@ except ImportError:
        from cgi import parse_qs
 
 std_headers = {
-       'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.12) Gecko/20101028 Firefox/3.6.12',
+       'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:5.0.1) Gecko/20100101 Firefox/5.0.1',
        'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate',
@@ -46,6 +47,7 @@ std_headers = {
 
 simple_title_chars = string.ascii_letters.decode('ascii') + string.digits.decode('ascii')
 
+
 def preferredencoding():
        """Get preferred encoding.
 
@@ -62,6 +64,7 @@ def preferredencoding():
                        yield pref
        return yield_preferredencoding().next()
 
+
 def htmlentity_transform(matchobj):
        """Transforms an HTML entity to a Unicode character.
 
@@ -88,11 +91,13 @@ def htmlentity_transform(matchobj):
        # Unknown entity in name, return its literal representation
        return (u'&%s;' % entity)
 
+
 def sanitize_title(utitle):
        """Sanitizes a video title so it could be used as part of a filename."""
        utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle)
        return utitle.replace(unicode(os.sep), u'%')
 
+
 def sanitize_open(filename, open_mode):
        """Try to open the given filename, and slightly tweak it if this fails.
 
@@ -119,13 +124,15 @@ def sanitize_open(filename, open_mode):
                stream = open(filename, open_mode)
                return (stream, filename)
 
+
 def timeconvert(timestr):
-    """Convert RFC 2822 defined time string into system timestamp"""
-    timestamp = None
-    timetuple = email.utils.parsedate_tz(timestr)
-    if timetuple is not None:
-        timestamp = email.utils.mktime_tz(timetuple)
-    return timestamp
+       """Convert RFC 2822 defined time string into system timestamp"""
+       timestamp = None
+       timetuple = email.utils.parsedate_tz(timestr)
+       if timetuple is not None:
+               timestamp = email.utils.mktime_tz(timetuple)
+       return timestamp
+
 
 class DownloadError(Exception):
        """Download Error exception.
@@ -136,6 +143,7 @@ class DownloadError(Exception):
        """
        pass
 
+
 class SameFileError(Exception):
        """Same File exception.
 
@@ -144,6 +152,7 @@ class SameFileError(Exception):
        """
        pass
 
+
 class PostProcessingError(Exception):
        """Post Processing exception.
 
@@ -152,6 +161,7 @@ class PostProcessingError(Exception):
        """
        pass
 
+
 class UnavailableVideoError(Exception):
        """Unavailable Format exception.
 
@@ -160,6 +170,7 @@ class UnavailableVideoError(Exception):
        """
        pass
 
+
 class ContentTooShortError(Exception):
        """Content Too Short exception.
 
@@ -175,6 +186,7 @@ class ContentTooShortError(Exception):
                self.downloaded = downloaded
                self.expected = expected
 
+
 class YoutubeDLHandler(urllib2.HTTPHandler):
        """Handler for HTTP requests and responses.
 
@@ -184,11 +196,11 @@ class YoutubeDLHandler(urllib2.HTTPHandler):
        a particular request, the original request in the program code only has
        to include the HTTP header "Youtubedl-No-Compression", which will be
        removed before making the real request.
-       
+
        Part of this code was copied from:
 
-         http://techknack.net/python-urllib2-handlers/
-         
+       http://techknack.net/python-urllib2-handlers/
+
        Andrew Rowls, the author of that code, agreed to release it to the
        public domain.
        """
@@ -199,7 +211,7 @@ class YoutubeDLHandler(urllib2.HTTPHandler):
                        return zlib.decompress(data, -zlib.MAX_WBITS)
                except zlib.error:
                        return zlib.decompress(data)
-       
+
        @staticmethod
        def addinfourl_wrapper(stream, headers, url, code):
                if hasattr(urllib2.addinfourl, 'getcode'):
@@ -207,7 +219,7 @@ class YoutubeDLHandler(urllib2.HTTPHandler):
                ret = urllib2.addinfourl(stream, headers, url)
                ret.code = code
                return ret
-       
+
        def http_request(self, req):
                for h in std_headers:
                        if h in req.headers:
@@ -233,6 +245,7 @@ class YoutubeDLHandler(urllib2.HTTPHandler):
                        resp.msg = old_resp.msg
                return resp
 
+
 class FileDownloader(object):
        """File Downloader class.
 
@@ -324,7 +337,7 @@ class FileDownloader(object):
                else:
                        exponent = long(math.log(bytes, 1024.0))
                suffix = 'bkMGTPEZY'[exponent]
-               converted = float(bytes) / float(1024**exponent)
+               converted = float(bytes) / float(1024 ** exponent)
                return '%.2f%s' % (converted, suffix)
 
        @staticmethod
@@ -462,7 +475,7 @@ class FileDownloader(object):
                        os.rename(old_filename, new_filename)
                except (IOError, OSError), err:
                        self.trouble(u'ERROR: unable to rename file')
-       
+
        def try_utime(self, filename, last_modified_hdr):
                """Try to set the last-modified time of the given file."""
                if last_modified_hdr is None:
@@ -476,7 +489,7 @@ class FileDownloader(object):
                if filetime is None:
                        return
                try:
-                       os.utime(filename,(time.time(), filetime))
+                       os.utime(filename, (time.time(), filetime))
                except:
                        pass
 
@@ -679,7 +692,7 @@ class FileDownloader(object):
                # Request parameters in case of being able to resume
                if self.params.get('continuedl', False) and resume_len != 0:
                        self.report_resuming_byte(resume_len)
-                       request.add_header('Range','bytes=%d-' % resume_len)
+                       request.add_header('Range', 'bytes=%d-' % resume_len)
                        open_mode = 'ab'
 
                count = 0
@@ -705,7 +718,7 @@ class FileDownloader(object):
                                        else:
                                                # Examine the reported length
                                                if (content_length is not None and
-                                                   (resume_len - 100 < long(content_length) < resume_len + 100)):
+                                                               (resume_len - 100 < long(content_length) < resume_len + 100)):
                                                        # The file had already been fully downloaded.
                                                        # Explanation to the above condition: in issue #175 it was revealed that
                                                        # YouTube sometimes adds or removes a few bytes from the end of the file,
@@ -783,6 +796,7 @@ class FileDownloader(object):
 
                return True
 
+
 class InfoExtractor(object):
        """Information Extractor class.
 
@@ -854,10 +868,11 @@ class InfoExtractor(object):
                """Real extraction process. Redefine in subclasses."""
                pass
 
+
 class YoutubeIE(InfoExtractor):
        """Information extractor for youtube.com."""
 
-       _VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?:(?:(?:v|embed)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=)))?([0-9A-Za-z_-]+)(?(1).+)?$'
+       _VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?:(?:(?:v|embed|e)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=)))?([0-9A-Za-z_-]+)(?(1).+)?$'
        _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
        _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
        _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
@@ -1008,7 +1023,7 @@ class YoutubeIE(InfoExtractor):
                self.report_video_info_webpage_download(video_id)
                for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
                        video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
-                                          % (video_id, el_type))
+                                       % (video_id, el_type))
                        request = urllib2.Request(video_info_url)
                        try:
                                video_info_webpage = urllib2.urlopen(request).read()
@@ -1055,10 +1070,10 @@ class YoutubeIE(InfoExtractor):
 
                # upload date
                upload_date = u'NA'
-               mobj = re.search(r'id="eow-date".*?>(.*?)</span>', video_webpage, re.DOTALL)
+               mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
                if mobj is not None:
                        upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
-                       format_expressions = ['%d %B %Y', '%B %d %Y']
+                       format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y']
                        for expression in format_expressions:
                                try:
                                        upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d')
@@ -1078,8 +1093,10 @@ class YoutubeIE(InfoExtractor):
                # Decide which formats to download
                req_format = self._downloader.params.get('format', None)
 
-               if 'fmt_url_map' in video_info:
-                       url_map = dict(tuple(pair.split('|')) for pair in video_info['fmt_url_map'][0].split(','))
+               if 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
+                       url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
+                       url_data = [dict(pairStr.split('=') for pairStr in uds.split('&')) for uds in url_data_strs]
+                       url_map = dict((ud['itag'], urllib.unquote(ud['url'])) for ud in url_data)
                        format_limit = self._downloader.params.get('format_limit', None)
                        if format_limit is not None and format_limit in self._available_formats:
                                format_list = self._available_formats[self._available_formats.index(format_limit):]
@@ -1368,6 +1385,7 @@ class DailymotionIE(InfoExtractor):
                except UnavailableVideoError:
                        self._downloader.trouble(u'\nERROR: unable to download video')
 
+
 class GoogleIE(InfoExtractor):
        """Information extractor for video.google.com."""
 
@@ -1461,7 +1479,6 @@ class GoogleIE(InfoExtractor):
                else:   # we need something to pass to process_info
                        video_thumbnail = ''
 
-
                try:
                        # Process video information
                        self._downloader.process_info({
@@ -1661,7 +1678,8 @@ class YahooIE(InfoExtractor):
                        self._downloader.trouble(u'ERROR: unable to extract video description')
                        return
                video_description = mobj.group(1).decode('utf-8')
-               if not video_description: video_description = 'No description available.'
+               if not video_description:
+                       video_description = 'No description available.'
 
                # Extract video height and width
                mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
@@ -1682,8 +1700,8 @@ class YahooIE(InfoExtractor):
                yv_lg = 'R0xx6idZnW2zlrKP8xxAIR'  # not sure what this represents
                yv_bitrate = '700'  # according to Wikipedia this is hard-coded
                request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
-                                         '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
-                                         '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
+                               '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
+                               '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
                try:
                        self.report_download_webpage(video_id)
                        webpage = urllib2.urlopen(request).read()
@@ -1776,11 +1794,11 @@ class GenericIE(InfoExtractor):
                        return
 
                video_url = urllib.unquote(mobj.group(1))
-               video_id  = os.path.basename(video_url)
+               video_id = os.path.basename(video_url)
 
                # here's a fun little line of code for you:
                video_extension = os.path.splitext(video_id)[1][1:]
-               video_id        = os.path.splitext(video_id)[0]
+               video_id = os.path.splitext(video_id)[0]
 
                # it's tempting to parse this further, but you would
                # have to take into account all the variations like
@@ -1853,7 +1871,7 @@ class YoutubeSearchIE(InfoExtractor):
 
                prefix, query = query.split(':')
                prefix = prefix[8:]
-               query  = query.encode('utf-8')
+               query = query.encode('utf-8')
                if prefix == '':
                        self._download_n_results(query, 1)
                        return
@@ -1867,7 +1885,7 @@ class YoutubeSearchIE(InfoExtractor):
                                        self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
                                        return
                                elif n > self._max_youtube_results:
-                                       self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)'  % (self._max_youtube_results, n))
+                                       self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
                                        n = self._max_youtube_results
                                self._download_n_results(query, n)
                                return
@@ -1911,6 +1929,7 @@ class YoutubeSearchIE(InfoExtractor):
 
                        pagenum = pagenum + 1
 
+
 class GoogleSearchIE(InfoExtractor):
        """Information Extractor for Google Video search queries."""
        _VALID_QUERY = r'gvsearch(\d+|all)?:[\s\S]+'
@@ -1944,7 +1963,7 @@ class GoogleSearchIE(InfoExtractor):
 
                prefix, query = query.split(':')
                prefix = prefix[8:]
-               query  = query.encode('utf-8')
+               query = query.encode('utf-8')
                if prefix == '':
                        self._download_n_results(query, 1)
                        return
@@ -1958,7 +1977,7 @@ class GoogleSearchIE(InfoExtractor):
                                        self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
                                        return
                                elif n > self._max_google_results:
-                                       self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)'  % (self._max_google_results, n))
+                                       self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
                                        n = self._max_google_results
                                self._download_n_results(query, n)
                                return
@@ -2002,6 +2021,7 @@ class GoogleSearchIE(InfoExtractor):
 
                        pagenum = pagenum + 1
 
+
 class YahooSearchIE(InfoExtractor):
        """Information Extractor for Yahoo! Video search queries."""
        _VALID_QUERY = r'yvsearch(\d+|all)?:[\s\S]+'
@@ -2035,7 +2055,7 @@ class YahooSearchIE(InfoExtractor):
 
                prefix, query = query.split(':')
                prefix = prefix[8:]
-               query  = query.encode('utf-8')
+               query = query.encode('utf-8')
                if prefix == '':
                        self._download_n_results(query, 1)
                        return
@@ -2049,7 +2069,7 @@ class YahooSearchIE(InfoExtractor):
                                        self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
                                        return
                                elif n > self._max_yahoo_results:
-                                       self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)'  % (self._max_yahoo_results, n))
+                                       self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
                                        n = self._max_yahoo_results
                                self._download_n_results(query, n)
                                return
@@ -2093,11 +2113,12 @@ class YahooSearchIE(InfoExtractor):
 
                        pagenum = pagenum + 1
 
+
 class YoutubePlaylistIE(InfoExtractor):
        """Information Extractor for YouTube playlists."""
 
-       _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists)\?.*?p=|user/.*?/user/|p/)([^&]+).*'
-       _TEMPLATE_URL = 'http://www.youtube.com/view_play_list?p=%s&page=%s&gl=US&hl=en'
+       _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists|artist)\?.*?(p|a)=|user/.*?/user/|p/|user/.*?#[pg]/c/)([0-9A-Za-z]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
+       _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
        _VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
        _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
        _youtube_ie = None
@@ -2124,14 +2145,26 @@ class YoutubePlaylistIE(InfoExtractor):
                        self._downloader.trouble(u'ERROR: invalid url: %s' % url)
                        return
 
+               # Single video case
+               if mobj.group(3) is not None:
+                       self._youtube_ie.extract(mobj.group(3))
+                       return
+
                # Download playlist pages
-               playlist_id = mobj.group(1)
+               # prefix is 'p' as default for playlists but there are other types that need extra care
+               playlist_prefix = mobj.group(1)
+               if playlist_prefix == 'a':
+                       playlist_access = 'artist'
+               else:
+                       playlist_prefix = 'p'
+                       playlist_access = 'view_play_list'
+               playlist_id = mobj.group(2)
                video_ids = []
                pagenum = 1
 
                while True:
                        self.report_download_page(playlist_id, pagenum)
-                       request = urllib2.Request(self._TEMPLATE_URL % (playlist_id, pagenum))
+                       request = urllib2.Request(self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum))
                        try:
                                page = urllib2.urlopen(request).read()
                        except (urllib2.URLError, httplib.HTTPException, socket.error), err:
@@ -2157,6 +2190,7 @@ class YoutubePlaylistIE(InfoExtractor):
                        self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
                return
 
+
 class YoutubeUserIE(InfoExtractor):
        """Information Extractor for YouTube users."""
 
@@ -2178,7 +2212,7 @@ class YoutubeUserIE(InfoExtractor):
        def report_download_page(self, username, start_index):
                """Report attempt to download user page."""
                self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
-                                          (username, start_index, start_index + self._GDATA_PAGE_SIZE))
+                               (username, start_index, start_index + self._GDATA_PAGE_SIZE))
 
        def _real_initialize(self):
                self._youtube_ie.initialize()
@@ -2240,9 +2274,9 @@ class YoutubeUserIE(InfoExtractor):
                        video_ids = video_ids[playliststart:]
                else:
                        video_ids = video_ids[playliststart:playlistend]
-                       
+
                self._downloader.to_screen("[youtube] user %s: Collected %d video ids (downloading %d of them)" %
-                                          (username, all_ids_count, len(video_ids)))
+                               (username, all_ids_count, len(video_ids)))
 
                for video_id in video_ids:
                        self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % video_id)
@@ -2327,6 +2361,231 @@ class DepositFilesIE(InfoExtractor):
                except UnavailableVideoError, err:
                        self._downloader.trouble(u'ERROR: unable to download file')
 
+
+class FacebookIE(InfoExtractor):
+       """Information Extractor for Facebook"""
+
+       _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook.com/video/video.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
+       _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
+       _NETRC_MACHINE = 'facebook'
+       _available_formats = ['highqual', 'lowqual']
+       _video_extensions = {
+               'highqual': 'mp4',
+               'lowqual': 'mp4',
+       }
+
+       def __init__(self, downloader=None):
+               InfoExtractor.__init__(self, downloader)
+
+       @staticmethod
+       def suitable(url):
+               return (re.match(FacebookIE._VALID_URL, url) is not None)
+
+       def _reporter(self, message):
+               """Add header and report message."""
+               self._downloader.to_screen(u'[facebook] %s' % message)
+
+       def report_login(self):
+               """Report attempt to log in."""
+               self._reporter(u'Logging in')
+
+       def report_video_webpage_download(self, video_id):
+               """Report attempt to download video webpage."""
+               self._reporter(u'%s: Downloading video webpage' % video_id)
+
+       def report_information_extraction(self, video_id):
+               """Report attempt to extract video information."""
+               self._reporter(u'%s: Extracting video information' % video_id)
+
+       def _parse_page(self, video_webpage):
+               """Extract video information from page"""
+               # General data
+               data = {'title': r'class="video_title datawrap">(.*?)</',
+                       'description': r'<div class="datawrap">(.*?)</div>',
+                       'owner': r'\("video_owner_name", "(.*?)"\)',
+                       'upload_date': r'data-date="(.*?)"',
+                       'thumbnail':  r'\("thumb_url", "(?P<THUMB>.*?)"\)',
+                       }
+               video_info = {}
+               for piece in data.keys():
+                       mobj = re.search(data[piece], video_webpage)
+                       if mobj is not None:
+                               video_info[piece] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape"))
+
+               # Video urls
+               video_urls = {}
+               for fmt in self._available_formats:
+                       mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage)
+                       if mobj is not None:
+                               # URL is in a Javascript segment inside an escaped Unicode format within
+                               # the generally utf-8 page
+                               video_urls[fmt] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape"))
+               video_info['video_urls'] = video_urls
+
+               return video_info
+
+       def _real_initialize(self):
+               if self._downloader is None:
+                       return
+
+               useremail = None
+               password = None
+               downloader_params = self._downloader.params
+
+               # Attempt to use provided username and password or .netrc data
+               if downloader_params.get('username', None) is not None:
+                       useremail = downloader_params['username']
+                       password = downloader_params['password']
+               elif downloader_params.get('usenetrc', False):
+                       try:
+                               info = netrc.netrc().authenticators(self._NETRC_MACHINE)
+                               if info is not None:
+                                       useremail = info[0]
+                                       password = info[2]
+                               else:
+                                       raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
+                       except (IOError, netrc.NetrcParseError), err:
+                               self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
+                               return
+
+               if useremail is None:
+                       return
+
+               # Log in
+               login_form = {
+                       'email': useremail,
+                       'pass': password,
+                       'login': 'Log+In'
+                       }
+               request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form))
+               try:
+                       self.report_login()
+                       login_results = urllib2.urlopen(request).read()
+                       if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
+                               self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
+                               return
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
+                       return
+
+       def _real_extract(self, url):
+               mobj = re.match(self._VALID_URL, url)
+               if mobj is None:
+                       self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+                       return
+               video_id = mobj.group('ID')
+
+               # Get video webpage
+               self.report_video_webpage_download(video_id)
+               request = urllib2.Request('https://www.facebook.com/video/video.php?v=%s' % video_id)
+               try:
+                       page = urllib2.urlopen(request)
+                       video_webpage = page.read()
+               except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+                       self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
+                       return
+
+               # Start extracting information
+               self.report_information_extraction(video_id)
+
+               # Extract information
+               video_info = self._parse_page(video_webpage)
+
+               # uploader
+               if 'owner' not in video_info:
+                       self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+                       return
+               video_uploader = video_info['owner']
+
+               # title
+               if 'title' not in video_info:
+                       self._downloader.trouble(u'ERROR: unable to extract video title')
+                       return
+               video_title = video_info['title']
+               video_title = video_title.decode('utf-8')
+               video_title = sanitize_title(video_title)
+
+               # simplified title
+               simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
+               simple_title = simple_title.strip(ur'_')
+
+               # thumbnail image
+               if 'thumbnail' not in video_info:
+                       self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
+                       video_thumbnail = ''
+               else:
+                       video_thumbnail = video_info['thumbnail']
+
+               # upload date
+               upload_date = u'NA'
+               if 'upload_date' in video_info:
+                       upload_time = video_info['upload_date']
+                       timetuple = email.utils.parsedate_tz(upload_time)
+                       if timetuple is not None:
+                               try:
+                                       upload_date = time.strftime('%Y%m%d', timetuple[0:9])
+                               except:
+                                       pass
+
+               # description
+               video_description = 'No description available.'
+               if (self._downloader.params.get('forcedescription', False) and
+                       'description' in video_info):
+                       video_description = video_info['description']
+
+               url_map = video_info['video_urls']
+               if len(url_map.keys()) > 0:
+                       # Decide which formats to download
+                       req_format = self._downloader.params.get('format', None)
+                       format_limit = self._downloader.params.get('format_limit', None)
+
+                       if format_limit is not None and format_limit in self._available_formats:
+                               format_list = self._available_formats[self._available_formats.index(format_limit):]
+                       else:
+                               format_list = self._available_formats
+                       existing_formats = [x for x in format_list if x in url_map]
+                       if len(existing_formats) == 0:
+                               self._downloader.trouble(u'ERROR: no known formats available for video')
+                               return
+                       if req_format is None:
+                               video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
+                       elif req_format == '-1':
+                               video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
+                       else:
+                               # Specific format
+                               if req_format not in url_map:
+                                       self._downloader.trouble(u'ERROR: requested format not available')
+                                       return
+                               video_url_list = [(req_format, url_map[req_format])] # Specific format
+
+               for format_param, video_real_url in video_url_list:
+
+                       # At this point we have a new video
+                       self._downloader.increment_downloads()
+
+                       # Extension
+                       video_extension = self._video_extensions.get(format_param, 'mp4')
+
+                       # Find the video URL in fmt_url_map or conn paramters
+                       try:
+                               # Process video information
+                               self._downloader.process_info({
+                                       'id':           video_id.decode('utf-8'),
+                                       'url':          video_real_url.decode('utf-8'),
+                                       'uploader':     video_uploader.decode('utf-8'),
+                                       'upload_date':  upload_date,
+                                       'title':        video_title,
+                                       'stitle':       simple_title,
+                                       'ext':          video_extension.decode('utf-8'),
+                                       'format':       (format_param is None and u'NA' or format_param.decode('utf-8')),
+                                       'thumbnail':    video_thumbnail.decode('utf-8'),
+                                       'description':  video_description.decode('utf-8'),
+                                       'player_url':   None,
+                               })
+                       except UnavailableVideoError, err:
+                               self._downloader.trouble(u'\nERROR: unable to download video')
+
+
 class PostProcessor(object):
        """Post Processor class.
 
@@ -2373,6 +2632,89 @@ class PostProcessor(object):
                """
                return information # by default, do nothing
 
+
+class FFmpegExtractAudioPP(PostProcessor):
+
+       def __init__(self, downloader=None, preferredcodec=None):
+               PostProcessor.__init__(self, downloader)
+               if preferredcodec is None:
+                       preferredcodec = 'best'
+               self._preferredcodec = preferredcodec
+
+       @staticmethod
+       def get_audio_codec(path):
+               try:
+                       cmd = ['ffprobe', '-show_streams', '--', path]
+                       handle = subprocess.Popen(cmd, stderr=file(os.path.devnull, 'w'), stdout=subprocess.PIPE)
+                       output = handle.communicate()[0]
+                       if handle.wait() != 0:
+                               return None
+               except (IOError, OSError):
+                       return None
+               audio_codec = None
+               for line in output.split('\n'):
+                       if line.startswith('codec_name='):
+                               audio_codec = line.split('=')[1].strip()
+                       elif line.strip() == 'codec_type=audio' and audio_codec is not None:
+                               return audio_codec
+               return None
+
+       @staticmethod
+       def run_ffmpeg(path, out_path, codec, more_opts):
+               try:
+                       cmd = ['ffmpeg', '-y', '-i', path, '-vn', '-acodec', codec] + more_opts + ['--', out_path]
+                       ret = subprocess.call(cmd, stdout=file(os.path.devnull, 'w'), stderr=subprocess.STDOUT)
+                       return (ret == 0)
+               except (IOError, OSError):
+                       return False
+
+       def run(self, information):
+               path = information['filepath']
+
+               filecodec = self.get_audio_codec(path)
+               if filecodec is None:
+                       self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe')
+                       return None
+
+               more_opts = []
+               if self._preferredcodec == 'best' or self._preferredcodec == filecodec:
+                       if filecodec == 'aac' or filecodec == 'mp3':
+                               # Lossless if possible
+                               acodec = 'copy'
+                               extension = filecodec
+                               if filecodec == 'aac':
+                                       more_opts = ['-f', 'adts']
+                       else:
+                               # MP3 otherwise.
+                               acodec = 'libmp3lame'
+                               extension = 'mp3'
+                               more_opts = ['-ab', '128k']
+               else:
+                       # We convert the audio (lossy)
+                       acodec = {'mp3': 'libmp3lame', 'aac': 'aac'}[self._preferredcodec]
+                       extension = self._preferredcodec
+                       more_opts = ['-ab', '128k']
+                       if self._preferredcodec == 'aac':
+                               more_opts += ['-f', 'adts']
+
+               (prefix, ext) = os.path.splitext(path)
+               new_path = prefix + '.' + extension
+               self._downloader.to_screen(u'[ffmpeg] Destination: %s' % new_path)
+               status = self.run_ffmpeg(path, new_path, acodec, more_opts)
+
+               if not status:
+                       self._downloader.to_stderr(u'WARNING: error running ffmpeg')
+                       return None
+
+               try:
+                       os.remove(path)
+               except (IOError, OSError):
+                       self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file')
+                       return None
+
+               information['filepath'] = new_path
+               return information
+
 ### MAIN PROGRAM ###
 if __name__ == '__main__':
        try:
@@ -2405,7 +2747,7 @@ if __name__ == '__main__':
                # Parse command line
                parser = optparse.OptionParser(
                        usage='Usage: %prog [options] url...',
-                       version='2010.12.09',
+                       version='2011.08.04',
                        conflict_handler='resolve',
                )
 
@@ -2497,6 +2839,13 @@ if __name__ == '__main__':
                                help='do not use the Last-modified header to set the file modification time', default=True)
                parser.add_option_group(filesystem)
 
+               postproc = optparse.OptionGroup(parser, 'Post-processing Options')
+               postproc.add_option('--extract-audio', action='store_true', dest='extractaudio', default=False,
+                               help='convert video files to audio-only files (requires ffmpeg and ffprobe)')
+               postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
+                               help='"best", "aac" or "mp3"; best by default')
+               parser.add_option_group(postproc)
+
                (opts, args) = parser.parse_args()
 
                # Open appropriate CookieJar
@@ -2568,6 +2917,9 @@ if __name__ == '__main__':
                                raise ValueError
                except (TypeError, ValueError), err:
                        parser.error(u'invalid playlist end number specified')
+               if opts.extractaudio:
+                       if opts.audioformat not in ['best', 'aac', 'mp3']:
+                               parser.error(u'invalid audio format specified')
 
                # Information extractors
                youtube_ie = YoutubeIE()
@@ -2582,6 +2934,7 @@ if __name__ == '__main__':
                yahoo_ie = YahooIE()
                yahoo_search_ie = YahooSearchIE(yahoo_ie)
                deposit_files_ie = DepositFilesIE()
+               facebook_ie = FacebookIE()
                generic_ie = GenericIE()
 
                # File downloader
@@ -2633,11 +2986,16 @@ if __name__ == '__main__':
                fd.add_info_extractor(yahoo_ie)
                fd.add_info_extractor(yahoo_search_ie)
                fd.add_info_extractor(deposit_files_ie)
+               fd.add_info_extractor(facebook_ie)
 
                # This must come last since it's the
                # fallback if none of the others work
                fd.add_info_extractor(generic_ie)
 
+               # PostProcessors
+               if opts.extractaudio:
+                       fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat))
+
                # Update version
                if opts.update_self:
                        update_self(fd, sys.argv[0])