/feedparser.py
Python | 4013 lines | 3681 code | 150 blank | 182 comment | 237 complexity | 1cd2e1c4a1372515454a48a1105d69e9 MD5 | raw file
Large files files are truncated, but you can click here to view the full file
- """Universal feed parser
- Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
- Visit https://code.google.com/p/feedparser/ for the latest version
- Visit http://packages.python.org/feedparser/ for the latest documentation
- Required: Python 2.4 or later
- Recommended: iconv_codec <http://cjkpython.i18n.org/>
- """
- __version__ = "5.1.3"
- __license__ = """
- Copyright (c) 2010-2012 Kurt McKee <contactme@kurtmckee.org>
- Copyright (c) 2002-2008 Mark Pilgrim
- All rights reserved.
- Redistribution and use in source and binary forms, with or without modification,
- are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE."""
- __author__ = "Mark Pilgrim <http://diveintomark.org/>"
- __contributors__ = ["Jason Diamond <http://injektilo.org/>",
- "John Beimler <http://john.beimler.org/>",
- "Fazal Majid <http://www.majid.info/mylos/weblog/>",
- "Aaron Swartz <http://aaronsw.com/>",
- "Kevin Marks <http://epeus.blogspot.com/>",
- "Sam Ruby <http://intertwingly.net/>",
- "Ade Oshineye <http://blog.oshineye.com/>",
- "Martin Pool <http://sourcefrog.net/>",
- "Kurt McKee <http://kurtmckee.org/>",
- "Bernd Schlapsi <https://github.com/brot>",]
- # HTTP "User-Agent" header to send to servers when downloading feeds.
- # If you are embedding feedparser in a larger application, you should
- # change this to your application name and URL.
- USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
- # HTTP "Accept" header to send to servers when downloading feeds. If you don't
- # want to send an Accept header, set this to None.
- ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
- # List of preferred XML parsers, by SAX driver name. These will be tried first,
- # but if they're not installed, Python will keep searching through its own list
- # of pre-installed parsers until it finds one that supports everything we need.
- PREFERRED_XML_PARSERS = ["drv_libxml2"]
- # If you want feedparser to automatically run HTML markup through HTML Tidy, set
- # this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
- # or utidylib <http://utidylib.berlios.de/>.
- TIDY_MARKUP = 0
- # List of Python interfaces for HTML Tidy, in order of preference. Only useful
- # if TIDY_MARKUP = 1
- PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
- # If you want feedparser to automatically resolve all relative URIs, set this
- # to 1.
- RESOLVE_RELATIVE_URIS = 1
- # If you want feedparser to automatically sanitize all potentially unsafe
- # HTML content, set this to 1.
- SANITIZE_HTML = 1
- # If you want feedparser to automatically parse microformat content embedded
- # in entry contents, set this to 1
- PARSE_MICROFORMATS = 1
- # ---------- Python 3 modules (make it work if possible) ----------
- try:
- import rfc822
- except ImportError:
- from email import _parseaddr as rfc822
- try:
- # Python 3.1 introduces bytes.maketrans and simultaneously
- # deprecates string.maketrans; use bytes.maketrans if possible
- _maketrans = bytes.maketrans
- except (NameError, AttributeError):
- import string
- _maketrans = string.maketrans
- # base64 support for Atom feeds that contain embedded binary data
- try:
- import base64, binascii
- except ImportError:
- base64 = binascii = None
- else:
- # Python 3.1 deprecates decodestring in favor of decodebytes
- _base64decode = getattr(base64, 'decodebytes', base64.decodestring)
- # _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
- # _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
- try:
- if bytes is str:
- # In Python 2.5 and below, bytes doesn't exist (NameError)
- # In Python 2.6 and above, bytes and str are the same type
- raise NameError
- except NameError:
- # Python 2
- def _s2bytes(s):
- return s
- def _l2bytes(l):
- return ''.join(map(chr, l))
- else:
- # Python 3
- def _s2bytes(s):
- return bytes(s, 'utf8')
- def _l2bytes(l):
- return bytes(l)
- # If you want feedparser to allow all URL schemes, set this to ()
- # List culled from Python's urlparse documentation at:
- # http://docs.python.org/library/urlparse.html
- # as well as from "URI scheme" at Wikipedia:
- # https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
- # Many more will likely need to be added!
- ACCEPTABLE_URI_SCHEMES = (
- 'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
- 'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
- 'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
- 'wais',
- # Additional common-but-unofficial schemes
- 'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
- 'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
- )
- #ACCEPTABLE_URI_SCHEMES = ()
- # ---------- required modules (should come with any Python distribution) ----------
- import cgi
- import codecs
- import copy
- import datetime
- import re
- import struct
- import time
- import types
- import urllib
- import urllib2
- import urlparse
- import warnings
- from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
- try:
- from io import BytesIO as _StringIO
- except ImportError:
- try:
- from cStringIO import StringIO as _StringIO
- except ImportError:
- from StringIO import StringIO as _StringIO
- # ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
- # gzip is included with most Python distributions, but may not be available if you compiled your own
- try:
- import gzip
- except ImportError:
- gzip = None
- try:
- import zlib
- except ImportError:
- zlib = None
- # If a real XML parser is available, feedparser will attempt to use it. feedparser has
- # been tested with the built-in SAX parser and libxml2. On platforms where the
- # Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
- # versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
- try:
- import xml.sax
- from xml.sax.saxutils import escape as _xmlescape
- except ImportError:
- _XML_AVAILABLE = 0
- def _xmlescape(data,entities={}):
- data = data.replace('&', '&')
- data = data.replace('>', '>')
- data = data.replace('<', '<')
- for char, entity in entities:
- data = data.replace(char, entity)
- return data
- else:
- try:
- xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
- except xml.sax.SAXReaderNotAvailable:
- _XML_AVAILABLE = 0
- else:
- _XML_AVAILABLE = 1
- # sgmllib is not available by default in Python 3; if the end user doesn't have
- # it available then we'll lose illformed XML parsing, content santizing, and
- # microformat support (at least while feedparser depends on BeautifulSoup).
- try:
- import sgmllib
- except ImportError:
- # This is probably Python 3, which doesn't include sgmllib anymore
- _SGML_AVAILABLE = 0
- # Mock sgmllib enough to allow subclassing later on
- class sgmllib(object):
- class SGMLParser(object):
- def goahead(self, i):
- pass
- def parse_starttag(self, i):
- pass
- else:
- _SGML_AVAILABLE = 1
- # sgmllib defines a number of module-level regular expressions that are
- # insufficient for the XML parsing feedparser needs. Rather than modify
- # the variables directly in sgmllib, they're defined here using the same
- # names, and the compiled code objects of several sgmllib.SGMLParser
- # methods are copied into _BaseHTMLProcessor so that they execute in
- # feedparser's scope instead of sgmllib's scope.
- charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
- tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
- attrfind = re.compile(
- r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
- r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
- )
- # Unfortunately, these must be copied over to prevent NameError exceptions
- entityref = sgmllib.entityref
- incomplete = sgmllib.incomplete
- interesting = sgmllib.interesting
- shorttag = sgmllib.shorttag
- shorttagopen = sgmllib.shorttagopen
- starttagopen = sgmllib.starttagopen
- class _EndBracketRegEx:
- def __init__(self):
- # Overriding the built-in sgmllib.endbracket regex allows the
- # parser to find angle brackets embedded in element attributes.
- self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
- def search(self, target, index=0):
- match = self.endbracket.match(target, index)
- if match is not None:
- # Returning a new object in the calling thread's context
- # resolves a thread-safety.
- return EndBracketMatch(match)
- return None
- class EndBracketMatch:
- def __init__(self, match):
- self.match = match
- def start(self, n):
- return self.match.end(n)
- endbracket = _EndBracketRegEx()
- # iconv_codec provides support for more character encodings.
- # It's available from http://cjkpython.i18n.org/
- try:
- import iconv_codec
- except ImportError:
- pass
- # chardet library auto-detects character encodings
- # Download from http://chardet.feedparser.org/
- try:
- import chardet
- except ImportError:
- chardet = None
- # BeautifulSoup is used to extract microformat content from HTML
- # feedparser is tested using BeautifulSoup 3.2.0
- # http://www.crummy.com/software/BeautifulSoup/
- try:
- import BeautifulSoup
- except ImportError:
- BeautifulSoup = None
- PARSE_MICROFORMATS = False
- # ---------- don't touch these ----------
- class ThingsNobodyCaresAboutButMe(Exception): pass
- class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
- class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
- class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
- class UndeclaredNamespace(Exception): pass
- SUPPORTED_VERSIONS = {'': u'unknown',
- 'rss090': u'RSS 0.90',
- 'rss091n': u'RSS 0.91 (Netscape)',
- 'rss091u': u'RSS 0.91 (Userland)',
- 'rss092': u'RSS 0.92',
- 'rss093': u'RSS 0.93',
- 'rss094': u'RSS 0.94',
- 'rss20': u'RSS 2.0',
- 'rss10': u'RSS 1.0',
- 'rss': u'RSS (unknown version)',
- 'atom01': u'Atom 0.1',
- 'atom02': u'Atom 0.2',
- 'atom03': u'Atom 0.3',
- 'atom10': u'Atom 1.0',
- 'atom': u'Atom (unknown version)',
- 'cdf': u'CDF',
- }
- class FeedParserDict(dict):
- keymap = {'channel': 'feed',
- 'items': 'entries',
- 'guid': 'id',
- 'date': 'updated',
- 'date_parsed': 'updated_parsed',
- 'description': ['summary', 'subtitle'],
- 'description_detail': ['summary_detail', 'subtitle_detail'],
- 'url': ['href'],
- 'modified': 'updated',
- 'modified_parsed': 'updated_parsed',
- 'issued': 'published',
- 'issued_parsed': 'published_parsed',
- 'copyright': 'rights',
- 'copyright_detail': 'rights_detail',
- 'tagline': 'subtitle',
- 'tagline_detail': 'subtitle_detail'}
- def __getitem__(self, key):
- if key == 'category':
- try:
- return dict.__getitem__(self, 'tags')[0]['term']
- except IndexError:
- raise KeyError, "object doesn't have key 'category'"
- elif key == 'enclosures':
- norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
- return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
- elif key == 'license':
- for link in dict.__getitem__(self, 'links'):
- if link['rel']==u'license' and 'href' in link:
- return link['href']
- elif key == 'updated':
- # Temporarily help developers out by keeping the old
- # broken behavior that was reported in issue 310.
- # This fix was proposed in issue 328.
- if not dict.__contains__(self, 'updated') and \
- dict.__contains__(self, 'published'):
- warnings.warn("To avoid breaking existing software while "
- "fixing issue 310, a temporary mapping has been created "
- "from `updated` to `published` if `updated` doesn't "
- "exist. This fallback will be removed in a future version "
- "of feedparser.", DeprecationWarning)
- return dict.__getitem__(self, 'published')
- return dict.__getitem__(self, 'updated')
- elif key == 'updated_parsed':
- if not dict.__contains__(self, 'updated_parsed') and \
- dict.__contains__(self, 'published_parsed'):
- warnings.warn("To avoid breaking existing software while "
- "fixing issue 310, a temporary mapping has been created "
- "from `updated_parsed` to `published_parsed` if "
- "`updated_parsed` doesn't exist. This fallback will be "
- "removed in a future version of feedparser.",
- DeprecationWarning)
- return dict.__getitem__(self, 'published_parsed')
- return dict.__getitem__(self, 'updated_parsed')
- else:
- realkey = self.keymap.get(key, key)
- if isinstance(realkey, list):
- for k in realkey:
- if dict.__contains__(self, k):
- return dict.__getitem__(self, k)
- elif dict.__contains__(self, realkey):
- return dict.__getitem__(self, realkey)
- return dict.__getitem__(self, key)
- def __contains__(self, key):
- if key in ('updated', 'updated_parsed'):
- # Temporarily help developers out by keeping the old
- # broken behavior that was reported in issue 310.
- # This fix was proposed in issue 328.
- return dict.__contains__(self, key)
- try:
- self.__getitem__(key)
- except KeyError:
- return False
- else:
- return True
- has_key = __contains__
- def get(self, key, default=None):
- try:
- return self.__getitem__(key)
- except KeyError:
- return default
- def __setitem__(self, key, value):
- key = self.keymap.get(key, key)
- if isinstance(key, list):
- key = key[0]
- return dict.__setitem__(self, key, value)
- def setdefault(self, key, value):
- if key not in self:
- self[key] = value
- return value
- return self[key]
- def __getattr__(self, key):
- # __getattribute__() is called first; this will be called
- # only if an attribute was not already found
- try:
- return self.__getitem__(key)
- except KeyError:
- raise AttributeError, "object has no attribute '%s'" % key
- def __hash__(self):
- return id(self)
- _cp1252 = {
- 128: unichr(8364), # euro sign
- 130: unichr(8218), # single low-9 quotation mark
- 131: unichr( 402), # latin small letter f with hook
- 132: unichr(8222), # double low-9 quotation mark
- 133: unichr(8230), # horizontal ellipsis
- 134: unichr(8224), # dagger
- 135: unichr(8225), # double dagger
- 136: unichr( 710), # modifier letter circumflex accent
- 137: unichr(8240), # per mille sign
- 138: unichr( 352), # latin capital letter s with caron
- 139: unichr(8249), # single left-pointing angle quotation mark
- 140: unichr( 338), # latin capital ligature oe
- 142: unichr( 381), # latin capital letter z with caron
- 145: unichr(8216), # left single quotation mark
- 146: unichr(8217), # right single quotation mark
- 147: unichr(8220), # left double quotation mark
- 148: unichr(8221), # right double quotation mark
- 149: unichr(8226), # bullet
- 150: unichr(8211), # en dash
- 151: unichr(8212), # em dash
- 152: unichr( 732), # small tilde
- 153: unichr(8482), # trade mark sign
- 154: unichr( 353), # latin small letter s with caron
- 155: unichr(8250), # single right-pointing angle quotation mark
- 156: unichr( 339), # latin small ligature oe
- 158: unichr( 382), # latin small letter z with caron
- 159: unichr( 376), # latin capital letter y with diaeresis
- }
- _urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
- def _urljoin(base, uri):
- uri = _urifixer.sub(r'\1\3', uri)
- #try:
- if not isinstance(uri, unicode):
- uri = uri.decode('utf-8', 'ignore')
- uri = urlparse.urljoin(base, uri)
- if not isinstance(uri, unicode):
- return uri.decode('utf-8', 'ignore')
- return uri
- #except:
- # uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
- # return urlparse.urljoin(base, uri)
- class _FeedParserMixin:
- namespaces = {
- '': '',
- 'http://backend.userland.com/rss': '',
- 'http://blogs.law.harvard.edu/tech/rss': '',
- 'http://purl.org/rss/1.0/': '',
- 'http://my.netscape.com/rdf/simple/0.9/': '',
- 'http://example.com/newformat#': '',
- 'http://example.com/necho': '',
- 'http://purl.org/echo/': '',
- 'uri/of/echo/namespace#': '',
- 'http://purl.org/pie/': '',
- 'http://purl.org/atom/ns#': '',
- 'http://www.w3.org/2005/Atom': '',
- 'http://purl.org/rss/1.0/modules/rss091#': '',
- 'http://webns.net/mvcb/': 'admin',
- 'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
- 'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
- 'http://media.tangent.org/rss/1.0/': 'audio',
- 'http://backend.userland.com/blogChannelModule': 'blogChannel',
- 'http://web.resource.org/cc/': 'cc',
- 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
- 'http://purl.org/rss/1.0/modules/company': 'co',
- 'http://purl.org/rss/1.0/modules/content/': 'content',
- 'http://my.theinfo.org/changed/1.0/rss/': 'cp',
- 'http://purl.org/dc/elements/1.1/': 'dc',
- 'http://purl.org/dc/terms/': 'dcterms',
- 'http://purl.org/rss/1.0/modules/email/': 'email',
- 'http://purl.org/rss/1.0/modules/event/': 'ev',
- 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
- 'http://freshmeat.net/rss/fm/': 'fm',
- 'http://xmlns.com/foaf/0.1/': 'foaf',
- 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
- 'http://postneo.com/icbm/': 'icbm',
- 'http://purl.org/rss/1.0/modules/image/': 'image',
- 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
- 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
- 'http://purl.org/rss/1.0/modules/link/': 'l',
- 'http://search.yahoo.com/mrss': 'media',
- # Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
- 'http://search.yahoo.com/mrss/': 'media',
- 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
- 'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
- 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
- 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
- 'http://purl.org/rss/1.0/modules/reference/': 'ref',
- 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
- 'http://purl.org/rss/1.0/modules/search/': 'search',
- 'http://purl.org/rss/1.0/modules/slash/': 'slash',
- 'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
- 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
- 'http://hacks.benhammersley.com/rss/streaming/': 'str',
- 'http://purl.org/rss/1.0/modules/subscription/': 'sub',
- 'http://purl.org/rss/1.0/modules/syndication/': 'sy',
- 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
- 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
- 'http://purl.org/rss/1.0/modules/threading/': 'thr',
- 'http://purl.org/rss/1.0/modules/textinput/': 'ti',
- 'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
- 'http://wellformedweb.org/commentAPI/': 'wfw',
- 'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
- 'http://www.w3.org/1999/xhtml': 'xhtml',
- 'http://www.w3.org/1999/xlink': 'xlink',
- 'http://www.w3.org/XML/1998/namespace': 'xml',
- }
- _matchnamespaces = {}
- can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
- can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
- can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
- html_types = [u'text/html', u'application/xhtml+xml']
- def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
- if not self._matchnamespaces:
- for k, v in self.namespaces.items():
- self._matchnamespaces[k.lower()] = v
- self.feeddata = FeedParserDict() # feed-level data
- self.encoding = encoding # character encoding
- self.entries = [] # list of entry-level data
- self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
- self.namespacesInUse = {} # dictionary of namespaces defined by the feed
- # the following are used internally to track state;
- # this is really out of control and should be refactored
- self.infeed = 0
- self.inentry = 0
- self.incontent = 0
- self.intextinput = 0
- self.inimage = 0
- self.inauthor = 0
- self.incontributor = 0
- self.inpublisher = 0
- self.insource = 0
- self.sourcedata = FeedParserDict()
- self.contentparams = FeedParserDict()
- self._summaryKey = None
- self.namespacemap = {}
- self.elementstack = []
- self.basestack = []
- self.langstack = []
- self.baseuri = baseuri or u''
- self.lang = baselang or None
- self.svgOK = 0
- self.title_depth = -1
- self.depth = 0
- if baselang:
- self.feeddata['language'] = baselang.replace('_','-')
- # A map of the following form:
- # {
- # object_that_value_is_set_on: {
- # property_name: depth_of_node_property_was_extracted_from,
- # other_property: depth_of_node_property_was_extracted_from,
- # },
- # }
- self.property_depth_map = {}
- def _normalize_attributes(self, kv):
- k = kv[0].lower()
- v = k in ('rel', 'type') and kv[1].lower() or kv[1]
- # the sgml parser doesn't handle entities in attributes, nor
- # does it pass the attribute values through as unicode, while
- # strict xml parsers do -- account for this difference
- if isinstance(self, _LooseFeedParser):
- v = v.replace('&', '&')
- if not isinstance(v, unicode):
- v = v.decode('utf-8')
- return (k, v)
- def unknown_starttag(self, tag, attrs):
- # increment depth counter
- self.depth += 1
- # normalize attrs
- attrs = map(self._normalize_attributes, attrs)
- # track xml:base and xml:lang
- attrsD = dict(attrs)
- baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
- if not isinstance(baseuri, unicode):
- baseuri = baseuri.decode(self.encoding, 'ignore')
- # ensure that self.baseuri is always an absolute URI that
- # uses a whitelisted URI scheme (e.g. not `javscript:`)
- if self.baseuri:
- self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
- else:
- self.baseuri = _urljoin(self.baseuri, baseuri)
- lang = attrsD.get('xml:lang', attrsD.get('lang'))
- if lang == '':
- # xml:lang could be explicitly set to '', we need to capture that
- lang = None
- elif lang is None:
- # if no xml:lang is specified, use parent lang
- lang = self.lang
- if lang:
- if tag in ('feed', 'rss', 'rdf:RDF'):
- self.feeddata['language'] = lang.replace('_','-')
- self.lang = lang
- self.basestack.append(self.baseuri)
- self.langstack.append(lang)
- # track namespaces
- for prefix, uri in attrs:
- if prefix.startswith('xmlns:'):
- self.trackNamespace(prefix[6:], uri)
- elif prefix == 'xmlns':
- self.trackNamespace(None, uri)
- # track inline content
- if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
- if tag in ('xhtml:div', 'div'):
- return # typepad does this 10/2007
- # element declared itself as escaped markup, but it isn't really
- self.contentparams['type'] = u'application/xhtml+xml'
- if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
- if tag.find(':') <> -1:
- prefix, tag = tag.split(':', 1)
- namespace = self.namespacesInUse.get(prefix, '')
- if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
- attrs.append(('xmlns',namespace))
- if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
- attrs.append(('xmlns',namespace))
- if tag == 'svg':
- self.svgOK += 1
- return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
- # match namespaces
- if tag.find(':') <> -1:
- prefix, suffix = tag.split(':', 1)
- else:
- prefix, suffix = '', tag
- prefix = self.namespacemap.get(prefix, prefix)
- if prefix:
- prefix = prefix + '_'
- # special hack for better tracking of empty textinput/image elements in illformed feeds
- if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
- self.intextinput = 0
- if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
- self.inimage = 0
- # call special handler (if defined) or default handler
- methodname = '_start_' + prefix + suffix
- try:
- method = getattr(self, methodname)
- return method(attrsD)
- except AttributeError:
- # Since there's no handler or something has gone wrong we explicitly add the element and its attributes
- unknown_tag = prefix + suffix
- if len(attrsD) == 0:
- # No attributes so merge it into the encosing dictionary
- return self.push(unknown_tag, 1)
- else:
- # Has attributes so create it in its own dictionary
- context = self._getContext()
- context[unknown_tag] = attrsD
- def unknown_endtag(self, tag):
- # match namespaces
- if tag.find(':') <> -1:
- prefix, suffix = tag.split(':', 1)
- else:
- prefix, suffix = '', tag
- prefix = self.namespacemap.get(prefix, prefix)
- if prefix:
- prefix = prefix + '_'
- if suffix == 'svg' and self.svgOK:
- self.svgOK -= 1
- # call special handler (if defined) or default handler
- methodname = '_end_' + prefix + suffix
- try:
- if self.svgOK:
- raise AttributeError()
- method = getattr(self, methodname)
- method()
- except AttributeError:
- self.pop(prefix + suffix)
- # track inline content
- if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
- # element declared itself as escaped markup, but it isn't really
- if tag in ('xhtml:div', 'div'):
- return # typepad does this 10/2007
- self.contentparams['type'] = u'application/xhtml+xml'
- if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
- tag = tag.split(':')[-1]
- self.handle_data('</%s>' % tag, escape=0)
- # track xml:base and xml:lang going out of scope
- if self.basestack:
- self.basestack.pop()
- if self.basestack and self.basestack[-1]:
- self.baseuri = self.basestack[-1]
- if self.langstack:
- self.langstack.pop()
- if self.langstack: # and (self.langstack[-1] is not None):
- self.lang = self.langstack[-1]
- self.depth -= 1
- def handle_charref(self, ref):
- # called for each character reference, e.g. for ' ', ref will be '160'
- if not self.elementstack:
- return
- ref = ref.lower()
- if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
- text = '&#%s;' % ref
- else:
- if ref[0] == 'x':
- c = int(ref[1:], 16)
- else:
- c = int(ref)
- text = unichr(c).encode('utf-8')
- self.elementstack[-1][2].append(text)
- def handle_entityref(self, ref):
- # called for each entity reference, e.g. for '©', ref will be 'copy'
- if not self.elementstack:
- return
- if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
- text = '&%s;' % ref
- elif ref in self.entities:
- text = self.entities[ref]
- if text.startswith('&#') and text.endswith(';'):
- return self.handle_entityref(text)
- else:
- try:
- name2codepoint[ref]
- except KeyError:
- text = '&%s;' % ref
- else:
- text = unichr(name2codepoint[ref]).encode('utf-8')
- self.elementstack[-1][2].append(text)
- def handle_data(self, text, escape=1):
- # called for each block of plain text, i.e. outside of any tag and
- # not containing any character or entity references
- if not self.elementstack:
- return
- if escape and self.contentparams.get('type') == u'application/xhtml+xml':
- text = _xmlescape(text)
- self.elementstack[-1][2].append(text)
- def handle_comment(self, text):
- # called for each comment, e.g. <!-- insert message here -->
- pass
- def handle_pi(self, text):
- # called for each processing instruction, e.g. <?instruction>
- pass
- def handle_decl(self, text):
- pass
- def parse_declaration(self, i):
- # override internal declaration handler to handle CDATA blocks
- if self.rawdata[i:i+9] == '<![CDATA[':
- k = self.rawdata.find(']]>', i)
- if k == -1:
- # CDATA block began but didn't finish
- k = len(self.rawdata)
- return k
- self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
- return k+3
- else:
- k = self.rawdata.find('>', i)
- if k >= 0:
- return k+1
- else:
- # We have an incomplete CDATA block.
- return k
- def mapContentType(self, contentType):
- contentType = contentType.lower()
- if contentType == 'text' or contentType == 'plain':
- contentType = u'text/plain'
- elif contentType == 'html':
- contentType = u'text/html'
- elif contentType == 'xhtml':
- contentType = u'application/xhtml+xml'
- return contentType
- def trackNamespace(self, prefix, uri):
- loweruri = uri.lower()
- if not self.version:
- if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
- self.version = u'rss090'
- elif loweruri == 'http://purl.org/rss/1.0/':
- self.version = u'rss10'
- elif loweruri == 'http://www.w3.org/2005/atom':
- self.version = u'atom10'
- if loweruri.find(u'backend.userland.com/rss') <> -1:
- # match any backend.userland.com namespace
- uri = u'http://backend.userland.com/rss'
- loweruri = uri
- if loweruri in self._matchnamespaces:
- self.namespacemap[prefix] = self._matchnamespaces[loweruri]
- self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
- else:
- self.namespacesInUse[prefix or ''] = uri
- def resolveURI(self, uri):
- return _urljoin(self.baseuri or u'', uri)
- def decodeEntities(self, element, data):
- return data
- def strattrs(self, attrs):
- return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
- def push(self, element, expectingText):
- self.elementstack.append([element, expectingText, []])
- def pop(self, element, stripWhitespace=1):
- if not self.elementstack:
- return
- if self.elementstack[-1][0] != element:
- return
- element, expectingText, pieces = self.elementstack.pop()
- if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
- # remove enclosing child element, but only if it is a <div> and
- # only if all the remaining content is nested underneath it.
- # This means that the divs would be retained in the following:
- # <div>foo</div><div>bar</div>
- while pieces and len(pieces)>1 and not pieces[-1].strip():
- del pieces[-1]
- while pieces and len(pieces)>1 and not pieces[0].strip():
- del pieces[0]
- if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
- depth = 0
- for piece in pieces[:-1]:
- if piece.startswith('</'):
- depth -= 1
- if depth == 0:
- break
- elif piece.startswith('<') and not piece.endswith('/>'):
- depth += 1
- else:
- pieces = pieces[1:-1]
- # Ensure each piece is a str for Python 3
- for (i, v) in enumerate(pieces):
- if not isinstance(v, unicode):
- pieces[i] = v.decode('utf-8')
- output = u''.join(pieces)
- if stripWhitespace:
- output = output.strip()
- if not expectingText:
- return output
- # decode base64 content
- if base64 and self.contentparams.get('base64', 0):
- try:
- output = _base64decode(output)
- except binascii.Error:
- pass
- except binascii.Incomplete:
- pass
- except TypeError:
- # In Python 3, base64 takes and outputs bytes, not str
- # This may not be the most correct way to accomplish this
- output = _base64decode(output.encode('utf-8')).decode('utf-8')
- # resolve relative URIs
- if (element in self.can_be_relative_uri) and output:
- output = self.resolveURI(output)
- # decode entities within embedded markup
- if not self.contentparams.get('base64', 0):
- output = self.decodeEntities(element, output)
- # some feed formats require consumers to guess
- # whether the content is html or plain text
- if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
- if self.lookslikehtml(output):
- self.contentparams['type'] = u'text/html'
- # remove temporary cruft from contentparams
- try:
- del self.contentparams['mode']
- except KeyError:
- pass
- try:
- del self.contentparams['base64']
- except KeyError:
- pass
- is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
- # resolve relative URIs within embedded markup
- if is_htmlish and RESOLVE_RELATIVE_URIS:
- if element in self.can_contain_relative_uris:
- output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
- # parse microformats
- # (must do this before sanitizing because some microformats
- # rely on elements that we sanitize)
- if PARSE_MICROFORMATS and is_htmlish and element in ['content', 'description', 'summary']:
- mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
- if mfresults:
- for tag in mfresults.get('tags', []):
- self._addTag(tag['term'], tag['scheme'], tag['label'])
- for enclosure in mfresults.get('enclosures', []):
- self._start_enclosure(enclosure)
- for xfn in mfresults.get('xfn', []):
- self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
- vcard = mfresults.get('vcard')
- if vcard:
- self._getContext()['vcard'] = vcard
- # sanitize embedded markup
- if is_htmlish and SANITIZE_HTML:
- if element in self.can_contain_dangerous_markup:
- output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
- if self.encoding and not isinstance(output, unicode):
- output = output.decode(self.encoding, 'ignore')
- # address common error where people take data that is already
- # utf-8, presume that it is iso-8859-1, and re-encode it.
- if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
- try:
- output = output.encode('iso-8859-1').decode('utf-8')
- except (UnicodeEncodeError, UnicodeDecodeError):
- pass
- # map win-1252 extensions to the proper code points
- if isinstance(output, unicode):
- output = output.translate(_cp1252)
- # categories/tags/keywords/whatever are handled in _end_category
- if element == 'category':
- return output
- if element == 'title' and -1 < self.title_depth <= self.depth:
- return output
- # store output in appropriate place(s)
- if self.inentry and not self.insource:
- if element == 'content':
- self.entries[-1].setdefault(element, [])
- contentparams = copy.deepcopy(self.contentparams)
- contentparams['value'] = output
- self.entries[-1][element].append(contentparams)
- elif element == 'link':
- if not self.inimage:
- # query variables in urls in link elements are improperly
- # converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
- # unhandled character references. fix this special case.
- output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
- self.entries[-1][element] = output
- if output:
- self.entries[-1]['links'][-1]['href'] = output
- else:
- if element == 'description':
- element = 'summary'
- old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
- if old_value_depth is None or self.depth <= old_value_depth:
- self.property_depth_map[self.entries[-1]][element] = self.depth
- self.entries[-1][element] = output
- if self.incontent:
- contentparams = copy.deepcopy(self.contentparams)
- contentparams['value'] = output
- self.entries[-1][element + '_detail'] = contentparams
- elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
- context = self._getContext()
- if element == 'description':
- element = 'subtitle'
- context[element] = output
- if element == 'link':
- # fix query variables; see above for the explanation
- output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
- context[element] = output
- context['links'][-1]['href'] = output
- elif self.incontent:
- contentparams = copy.deepcopy(self.contentparams)
- contentparams['value'] = output
- context[element + '_detail'] = contentparams
- return output
- def pushContent(self, tag, attrsD, defaultContentType, expectingText):
- self.incontent += 1
- if self.lang:
- self.lang=self.lang.replace('_','-')
- self.contentparams = FeedParserDict({
- 'type': self.mapContentType(attrsD.get('type', defaultContentType)),
- 'language': self.lang,
- 'base': self.baseuri})
- self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
- self.push(tag, expectingText)
- def popContent(self, tag):
- value = self.pop(tag)
- self.incontent -= 1
- self.contentparams.clear()
- return value
- # a number of elements in a number of RSS variants are nominally plain
- # text, but this is routinely ignored. This is an attempt to detect
- # the most common cases. As false positives often result in silent
- # data loss, this function errs on the conservative side.
- @staticmethod
- def lookslikehtml(s):
- # must have a close tag or an entity reference to qualify
- if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
- return
- # all tags must be in a restricted subset of valid HTML tags
- if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
- re.findall(r'</?(\w+)',s)):
- return
- # all entities must have been defined as valid HTML entities
- if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
- return
- return 1
- def _mapToStandardPrefix(self, name):
- colonpos = name.find(':')
- if colonpos <> -1:
- prefix = name[:colonpos]
- suffix = name[colonpos+1:]
- prefix = self.namespacemap.get(prefix, prefix)
- name = prefix + ':' + suffix
- return name
- def _getAttribute(self, attrsD, name):
- return attrsD.get(self._mapToStandardPrefix(name))
- def _isBase64(self, attrsD, contentparams):
- if attrsD.get('mode', '') == 'base64':
- return 1
- if self.contentparams['type'].startswith(u'text/'):
- return 0
- if self.contentparams['type'].endswith(u'+xml'):
- return 0
- if self.contentparams['type'].endswith(u'/xml'):
- return 0
- return 1
- def _itsAnHrefDamnIt(self, attrsD):
- href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
- if href:
- try:
- del attrsD['url']
- except KeyError:
- pass
- try:
- del attrsD['uri']
- except KeyError:
- pass
- attrsD['href'] = href
- return attrsD
- def _save(self, key, value, overwrite=False):
- context = self._getContext()
- if overwrite:
- context[key] = value
- else:
- context.setdefault(key, value)
- def _start_rss(self, attrsD):
- versionmap = {'0.91': u'rss091u',
- '0.92': u'rss092',
- '0.93': u'rss093',
- '0.94': u'rss094'}
- #If we're here then this is an RSS feed.
- #If we don't have a version or have a version that starts with something
- #other than RSS then there's been a mistake. Correct it.
- if not self.version or not self.version.startswith(u'rss'):
- attr_version = attrsD.get('version', '')
- version = versionmap.get(attr_version)
- if version:
- self.version = version
- elif attr_version.startswith('2.'):
- self.version = u'rss20'
- else:
- self.version = u'rss'
- def _start_channel(self, attrsD):
- self.infeed = 1
- self._cdf_common(attrsD)
- def _cdf_common(self, attrsD):
- if 'lastmod' in attrsD:
- self._start_modified({})
- self.elementstack[-1][-1] = attrsD['lastmod']
- self._end_modified()
- if 'href' in attrsD:
- self._start_link({})
- self.elementstack[-1][-1] = attrsD['href']
- self._end_link()
- def _start_feed(self, attrsD):
- self.infeed = 1
- versionmap = {'0.1': u'atom01',
- '0.2': u'atom02',
- '0.3': u'atom03'}
- if not self.version:
- attr_version = attrsD.get('version')
- version = versionmap.get(attr_version)
- if version:
- self.version = version
- else:
- self.version = u'atom'
- def _end_channel(self):
- self.infeed = 0
- _end_feed = _end_channel
- def _start_image(self, attrsD):
- context = self._getContext()
- if not self.inentry:
- context.setdefault('image', FeedParserDict())
- self.inimage = 1
- self.title_depth = -1
- self.push('image', 0)
- def _end_image(self):
- self.pop('image')
- self.inimage = 0
- def _start_textinput(self, attrsD):
- context = self._getContext()
- context.setdefault('textinput', FeedParserDict())
- self.intextinput = 1
- self.title_depth = -1
- self.push('textinput', 0)
- _start_textInput = _start_textinput
- def _end_textinput(self):
- self.pop('textinput')
- self.intextinput = 0
- _end_textInput = _end_textinput
- def _start_author(self, attrsD):
- self.inauthor = 1
- self.push('author', 1)
- # Append a new FeedParserDict when expecting an author
- context = self._getContext()
- context.setdefault('authors', [])
- context['authors'].append(FeedParserDict())
- _start_managingeditor = _start_author
- _start_dc_author = _start_author
- _start_dc_creator = _start_author
- _start_itunes_author = _start_author
- def _end_author(self):
- self.pop('author')
- self.inauthor = 0
- self._sync_author_detail()
- _end_managingeditor = _end_author
- _end_dc_author = _end_author
- _end_dc_creator = _end_author
- _end_itunes_author = _end_author
- def _start_itunes_owner(self, attrsD):
- self.inpublisher = 1
- self.push('publisher', 0)
- def _end_itunes_owner(self):
- self.pop('publisher')
- self.inpublisher = 0
- self._sync_author_detail('publisher')
- def _start_contributor(self, attrsD):
- self.incontributor = 1
- context = self._getContext()
- context.setdefault('contributors', [])
- context['contributors'].append(FeedParserDict())
- self.push('contributor', 0)
- def _end_contributor(self):
- self.pop('contributor')
- self.incontributor = 0
- def _start_dc_contributor(self, attrsD):
- self.incontributor = 1
- context = self._getContext()
- context.setdefault('contributors', [])
- context['contributors'].append(FeedParserDict())
- self.push('name', 0)
- def _end_dc_contributor(self):
- self._end_name()
- self.incontributor = 0
- def _start_name(self, attrsD):
- self.push('name', 0)
- _start_itunes_n…
Large files files are truncated, but you can click here to view the full file