/wiki/web/widgets/rss/feedparser.py
Python | 2860 lines | 2744 code | 48 blank | 68 comment | 102 complexity | cd573160f063f3ab691efaa196a575ff MD5 | raw file
Large files files are truncated, but you can click here to view the full file
- #!/usr/bin/env python
- """Universal feed parser
- Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
- Visit http://feedparser.org/ for the latest version
- Visit http://feedparser.org/docs/ for the latest documentation
- Required: Python 2.1 or later
- Recommended: Python 2.3 or later
- Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
- """
- __version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs"
- __license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
- Redistribution and use in source and binary forms, with or without modification,
- are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE."""
- __author__ = "Mark Pilgrim <http://diveintomark.org/>"
- __contributors__ = ["Jason Diamond <http://injektilo.org/>",
- "John Beimler <http://john.beimler.org/>",
- "Fazal Majid <http://www.majid.info/mylos/weblog/>",
- "Aaron Swartz <http://aaronsw.com/>",
- "Kevin Marks <http://epeus.blogspot.com/>"]
- _debug = 0
- # HTTP "User-Agent" header to send to servers when downloading feeds.
- # If you are embedding feedparser in a larger application, you should
- # change this to your application name and URL.
- USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
- # HTTP "Accept" header to send to servers when downloading feeds. If you don't
- # want to send an Accept header, set this to None.
- ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
- # List of preferred XML parsers, by SAX driver name. These will be tried first,
- # but if they're not installed, Python will keep searching through its own list
- # of pre-installed parsers until it finds one that supports everything we need.
- PREFERRED_XML_PARSERS = ["drv_libxml2"]
- # If you want feedparser to automatically run HTML markup through HTML Tidy, set
- # this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
- # or utidylib <http://utidylib.berlios.de/>.
- TIDY_MARKUP = 0
- # List of Python interfaces for HTML Tidy, in order of preference. Only useful
- # if TIDY_MARKUP = 1
- PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
- # ---------- required modules (should come with any Python distribution) ----------
- import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
- try:
- from cStringIO import StringIO as _StringIO
- except:
- from StringIO import StringIO as _StringIO
- # ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
- # gzip is included with most Python distributions, but may not be available if you compiled your own
- try:
- import gzip
- except:
- gzip = None
- try:
- import zlib
- except:
- zlib = None
- # If a real XML parser is available, feedparser will attempt to use it. feedparser has
- # been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
- # Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
- # versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
- try:
- import xml.sax
- xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
- from xml.sax.saxutils import escape as _xmlescape
- _XML_AVAILABLE = 1
- except:
- _XML_AVAILABLE = 0
- def _xmlescape(data):
- data = data.replace('&', '&')
- data = data.replace('>', '>')
- data = data.replace('<', '<')
- return data
- # base64 support for Atom feeds that contain embedded binary data
- try:
- import base64, binascii
- except:
- base64 = binascii = None
- # cjkcodecs and iconv_codec provide support for more character encodings.
- # Both are available from http://cjkpython.i18n.org/
- try:
- import cjkcodecs.aliases
- except:
- pass
- try:
- import iconv_codec
- except:
- pass
- # chardet library auto-detects character encodings
- # Download from http://chardet.feedparser.org/
- try:
- import chardet
- if _debug:
- import chardet.constants
- chardet.constants._debug = 1
- except:
- chardet = None
- # ---------- don't touch these ----------
- class ThingsNobodyCaresAboutButMe(Exception): pass
- class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
- class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
- class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
- class UndeclaredNamespace(Exception): pass
- sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
- sgmllib.special = re.compile('<!')
- sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]')
- SUPPORTED_VERSIONS = {'': 'unknown',
- 'rss090': 'RSS 0.90',
- 'rss091n': 'RSS 0.91 (Netscape)',
- 'rss091u': 'RSS 0.91 (Userland)',
- 'rss092': 'RSS 0.92',
- 'rss093': 'RSS 0.93',
- 'rss094': 'RSS 0.94',
- 'rss20': 'RSS 2.0',
- 'rss10': 'RSS 1.0',
- 'rss': 'RSS (unknown version)',
- 'atom01': 'Atom 0.1',
- 'atom02': 'Atom 0.2',
- 'atom03': 'Atom 0.3',
- 'atom10': 'Atom 1.0',
- 'atom': 'Atom (unknown version)',
- 'cdf': 'CDF',
- 'hotrss': 'Hot RSS'
- }
- try:
- UserDict = dict
- except NameError:
- # Python 2.1 does not have dict
- from UserDict import UserDict
- def dict(aList):
- rc = {}
- for k, v in aList:
- rc[k] = v
- return rc
- class FeedParserDict(UserDict):
- keymap = {'channel': 'feed',
- 'items': 'entries',
- 'guid': 'id',
- 'date': 'updated',
- 'date_parsed': 'updated_parsed',
- 'description': ['subtitle', 'summary'],
- 'url': ['href'],
- 'modified': 'updated',
- 'modified_parsed': 'updated_parsed',
- 'issued': 'published',
- 'issued_parsed': 'published_parsed',
- 'copyright': 'rights',
- 'copyright_detail': 'rights_detail',
- 'tagline': 'subtitle',
- 'tagline_detail': 'subtitle_detail'}
- def __getitem__(self, key):
- if key == 'category':
- return UserDict.__getitem__(self, 'tags')[0]['term']
- if key == 'categories':
- return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
- realkey = self.keymap.get(key, key)
- if type(realkey) == types.ListType:
- for k in realkey:
- if UserDict.has_key(self, k):
- return UserDict.__getitem__(self, k)
- if UserDict.has_key(self, key):
- return UserDict.__getitem__(self, key)
- return UserDict.__getitem__(self, realkey)
- def __setitem__(self, key, value):
- for k in self.keymap.keys():
- if key == k:
- key = self.keymap[k]
- if type(key) == types.ListType:
- key = key[0]
- return UserDict.__setitem__(self, key, value)
- def get(self, key, default=None):
- if self.has_key(key):
- return self[key]
- else:
- return default
- def setdefault(self, key, value):
- if not self.has_key(key):
- self[key] = value
- return self[key]
- def has_key(self, key):
- try:
- return hasattr(self, key) or UserDict.has_key(self, key)
- except AttributeError:
- return False
- def __getattr__(self, key):
- try:
- return self.__dict__[key]
- except KeyError:
- pass
- try:
- assert not key.startswith('_')
- return self.__getitem__(key)
- except:
- raise AttributeError, "object has no attribute '%s'" % key
- def __setattr__(self, key, value):
- if key.startswith('_') or key == 'data':
- self.__dict__[key] = value
- else:
- return self.__setitem__(key, value)
- def __contains__(self, key):
- return self.has_key(key)
- def zopeCompatibilityHack():
- global FeedParserDict
- del FeedParserDict
- def FeedParserDict(aDict=None):
- rc = {}
- if aDict:
- rc.update(aDict)
- return rc
- _ebcdic_to_ascii_map = None
- def _ebcdic_to_ascii(s):
- global _ebcdic_to_ascii_map
- if not _ebcdic_to_ascii_map:
- emap = (
- 0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
- 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
- 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
- 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
- 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
- 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
- 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
- 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
- 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
- 202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
- 209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
- 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
- 123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
- 125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
- 92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
- 48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
- )
- import string
- _ebcdic_to_ascii_map = string.maketrans( \
- ''.join(map(chr, range(256))), ''.join(map(chr, emap)))
- return s.translate(_ebcdic_to_ascii_map)
- _urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
- def _urljoin(base, uri):
- uri = _urifixer.sub(r'\1\3', uri)
- return urlparse.urljoin(base, uri)
- class _FeedParserMixin:
- namespaces = {'': '',
- 'http://backend.userland.com/rss': '',
- 'http://blogs.law.harvard.edu/tech/rss': '',
- 'http://purl.org/rss/1.0/': '',
- 'http://my.netscape.com/rdf/simple/0.9/': '',
- 'http://example.com/newformat#': '',
- 'http://example.com/necho': '',
- 'http://purl.org/echo/': '',
- 'uri/of/echo/namespace#': '',
- 'http://purl.org/pie/': '',
- 'http://purl.org/atom/ns#': '',
- 'http://www.w3.org/2005/Atom': '',
- 'http://purl.org/rss/1.0/modules/rss091#': '',
- 'http://webns.net/mvcb/': 'admin',
- 'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
- 'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
- 'http://media.tangent.org/rss/1.0/': 'audio',
- 'http://backend.userland.com/blogChannelModule': 'blogChannel',
- 'http://web.resource.org/cc/': 'cc',
- 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
- 'http://purl.org/rss/1.0/modules/company': 'co',
- 'http://purl.org/rss/1.0/modules/content/': 'content',
- 'http://my.theinfo.org/changed/1.0/rss/': 'cp',
- 'http://purl.org/dc/elements/1.1/': 'dc',
- 'http://purl.org/dc/terms/': 'dcterms',
- 'http://purl.org/rss/1.0/modules/email/': 'email',
- 'http://purl.org/rss/1.0/modules/event/': 'ev',
- 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
- 'http://freshmeat.net/rss/fm/': 'fm',
- 'http://xmlns.com/foaf/0.1/': 'foaf',
- 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
- 'http://postneo.com/icbm/': 'icbm',
- 'http://purl.org/rss/1.0/modules/image/': 'image',
- 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
- 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
- 'http://purl.org/rss/1.0/modules/link/': 'l',
- 'http://search.yahoo.com/mrss': 'media',
- 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
- 'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
- 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
- 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
- 'http://purl.org/rss/1.0/modules/reference/': 'ref',
- 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
- 'http://purl.org/rss/1.0/modules/search/': 'search',
- 'http://purl.org/rss/1.0/modules/slash/': 'slash',
- 'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
- 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
- 'http://hacks.benhammersley.com/rss/streaming/': 'str',
- 'http://purl.org/rss/1.0/modules/subscription/': 'sub',
- 'http://purl.org/rss/1.0/modules/syndication/': 'sy',
- 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
- 'http://purl.org/rss/1.0/modules/threading/': 'thr',
- 'http://purl.org/rss/1.0/modules/textinput/': 'ti',
- 'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
- 'http://wellformedweb.org/commentAPI/': 'wfw',
- 'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
- 'http://www.w3.org/1999/xhtml': 'xhtml',
- 'http://www.w3.org/XML/1998/namespace': 'xml',
- 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
- }
- _matchnamespaces = {}
- can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
- can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
- can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
- html_types = ['text/html', 'application/xhtml+xml']
- def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
- if _debug: sys.stderr.write('initializing FeedParser\n')
- if not self._matchnamespaces:
- for k, v in self.namespaces.items():
- self._matchnamespaces[k.lower()] = v
- self.feeddata = FeedParserDict() # feed-level data
- self.encoding = encoding # character encoding
- self.entries = [] # list of entry-level data
- self.version = '' # feed type/version, see SUPPORTED_VERSIONS
- self.namespacesInUse = {} # dictionary of namespaces defined by the feed
- # the following are used internally to track state;
- # this is really out of control and should be refactored
- self.infeed = 0
- self.inentry = 0
- self.incontent = 0
- self.intextinput = 0
- self.inimage = 0
- self.inauthor = 0
- self.incontributor = 0
- self.inpublisher = 0
- self.insource = 0
- self.sourcedata = FeedParserDict()
- self.contentparams = FeedParserDict()
- self._summaryKey = None
- self.namespacemap = {}
- self.elementstack = []
- self.basestack = []
- self.langstack = []
- self.baseuri = baseuri or ''
- self.lang = baselang or None
- if baselang:
- self.feeddata['language'] = baselang
- def unknown_starttag(self, tag, attrs):
- if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
- # normalize attrs
- attrs = [(k.lower(), v) for k, v in attrs]
- attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
- # track xml:base and xml:lang
- attrsD = dict(attrs)
- baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
- self.baseuri = _urljoin(self.baseuri, baseuri)
- lang = attrsD.get('xml:lang', attrsD.get('lang'))
- if lang == '':
- # xml:lang could be explicitly set to '', we need to capture that
- lang = None
- elif lang is None:
- # if no xml:lang is specified, use parent lang
- lang = self.lang
- if lang:
- if tag in ('feed', 'rss', 'rdf:RDF'):
- self.feeddata['language'] = lang
- self.lang = lang
- self.basestack.append(self.baseuri)
- self.langstack.append(lang)
- # track namespaces
- for prefix, uri in attrs:
- if prefix.startswith('xmlns:'):
- self.trackNamespace(prefix[6:], uri)
- elif prefix == 'xmlns':
- self.trackNamespace(None, uri)
- # track inline content
- if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
- # element declared itself as escaped markup, but it isn't really
- self.contentparams['type'] = 'application/xhtml+xml'
- if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
- # Note: probably shouldn't simply recreate localname here, but
- # our namespace handling isn't actually 100% correct in cases where
- # the feed redefines the default namespace (which is actually
- # the usual case for inline content, thanks Sam), so here we
- # cheat and just reconstruct the element based on localname
- # because that compensates for the bugs in our namespace handling.
- # This will horribly munge inline content with non-empty qnames,
- # but nobody actually does that, so I'm not fixing it.
- tag = tag.split(':')[-1]
- return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0)
- # match namespaces
- if tag.find(':') <> -1:
- prefix, suffix = tag.split(':', 1)
- else:
- prefix, suffix = '', tag
- prefix = self.namespacemap.get(prefix, prefix)
- if prefix:
- prefix = prefix + '_'
- # special hack for better tracking of empty textinput/image elements in illformed feeds
- if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
- self.intextinput = 0
- if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
- self.inimage = 0
- # call special handler (if defined) or default handler
- methodname = '_start_' + prefix + suffix
- try:
- method = getattr(self, methodname)
- return method(attrsD)
- except AttributeError:
- return self.push(prefix + suffix, 1)
- def unknown_endtag(self, tag):
- if _debug: sys.stderr.write('end %s\n' % tag)
- # match namespaces
- if tag.find(':') <> -1:
- prefix, suffix = tag.split(':', 1)
- else:
- prefix, suffix = '', tag
- prefix = self.namespacemap.get(prefix, prefix)
- if prefix:
- prefix = prefix + '_'
- # call special handler (if defined) or default handler
- methodname = '_end_' + prefix + suffix
- try:
- method = getattr(self, methodname)
- method()
- except AttributeError:
- self.pop(prefix + suffix)
- # track inline content
- if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
- # element declared itself as escaped markup, but it isn't really
- self.contentparams['type'] = 'application/xhtml+xml'
- if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
- tag = tag.split(':')[-1]
- self.handle_data('</%s>' % tag, escape=0)
- # track xml:base and xml:lang going out of scope
- if self.basestack:
- self.basestack.pop()
- if self.basestack and self.basestack[-1]:
- self.baseuri = self.basestack[-1]
- if self.langstack:
- self.langstack.pop()
- if self.langstack: # and (self.langstack[-1] is not None):
- self.lang = self.langstack[-1]
- def handle_charref(self, ref):
- # called for each character reference, e.g. for ' ', ref will be '160'
- if not self.elementstack: return
- ref = ref.lower()
- if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
- text = '&#%s;' % ref
- else:
- if ref[0] == 'x':
- c = int(ref[1:], 16)
- else:
- c = int(ref)
- text = unichr(c).encode('utf-8')
- self.elementstack[-1][2].append(text)
- def handle_entityref(self, ref):
- # called for each entity reference, e.g. for '©', ref will be 'copy'
- if not self.elementstack: return
- if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
- if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
- text = '&%s;' % ref
- else:
- # entity resolution graciously donated by Aaron Swartz
- def name2cp(k):
- import htmlentitydefs
- if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3
- return htmlentitydefs.name2codepoint[k]
- k = htmlentitydefs.entitydefs[k]
- if k.startswith('&#') and k.endswith(';'):
- return int(k[2:-1]) # not in latin-1
- return ord(k)
- try: name2cp(ref)
- except KeyError: text = '&%s;' % ref
- else: text = unichr(name2cp(ref)).encode('utf-8')
- self.elementstack[-1][2].append(text)
- def handle_data(self, text, escape=1):
- # called for each block of plain text, i.e. outside of any tag and
- # not containing any character or entity references
- if not self.elementstack: return
- if escape and self.contentparams.get('type') == 'application/xhtml+xml':
- text = _xmlescape(text)
- self.elementstack[-1][2].append(text)
- def handle_comment(self, text):
- # called for each comment, e.g. <!-- insert message here -->
- pass
- def handle_pi(self, text):
- # called for each processing instruction, e.g. <?instruction>
- pass
- def handle_decl(self, text):
- pass
- def parse_declaration(self, i):
- # override internal declaration handler to handle CDATA blocks
- if _debug: sys.stderr.write('entering parse_declaration\n')
- if self.rawdata[i:i+9] == '<![CDATA[':
- k = self.rawdata.find(']]>', i)
- if k == -1: k = len(self.rawdata)
- self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
- return k+3
- else:
- k = self.rawdata.find('>', i)
- return k+1
- def mapContentType(self, contentType):
- contentType = contentType.lower()
- if contentType == 'text':
- contentType = 'text/plain'
- elif contentType == 'html':
- contentType = 'text/html'
- elif contentType == 'xhtml':
- contentType = 'application/xhtml+xml'
- return contentType
- def trackNamespace(self, prefix, uri):
- loweruri = uri.lower()
- if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
- self.version = 'rss090'
- if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
- self.version = 'rss10'
- if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
- self.version = 'atom10'
- if loweruri.find('backend.userland.com/rss') <> -1:
- # match any backend.userland.com namespace
- uri = 'http://backend.userland.com/rss'
- loweruri = uri
- if self._matchnamespaces.has_key(loweruri):
- self.namespacemap[prefix] = self._matchnamespaces[loweruri]
- self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
- else:
- self.namespacesInUse[prefix or ''] = uri
- def resolveURI(self, uri):
- return _urljoin(self.baseuri or '', uri)
- def decodeEntities(self, element, data):
- return data
- def push(self, element, expectingText):
- self.elementstack.append([element, expectingText, []])
- def pop(self, element, stripWhitespace=1):
- if not self.elementstack: return
- if self.elementstack[-1][0] != element: return
- element, expectingText, pieces = self.elementstack.pop()
- output = ''.join(pieces)
- if stripWhitespace:
- output = output.strip()
- if not expectingText: return output
- # decode base64 content
- if base64 and self.contentparams.get('base64', 0):
- try:
- output = base64.decodestring(output)
- except binascii.Error:
- pass
- except binascii.Incomplete:
- pass
- # resolve relative URIs
- if (element in self.can_be_relative_uri) and output:
- output = self.resolveURI(output)
- # decode entities within embedded markup
- if not self.contentparams.get('base64', 0):
- output = self.decodeEntities(element, output)
- # remove temporary cruft from contentparams
- try:
- del self.contentparams['mode']
- except KeyError:
- pass
- try:
- del self.contentparams['base64']
- except KeyError:
- pass
- # resolve relative URIs within embedded markup
- if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
- if element in self.can_contain_relative_uris:
- output = _resolveRelativeURIs(output, self.baseuri, self.encoding)
- # sanitize embedded markup
- if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
- if element in self.can_contain_dangerous_markup:
- output = _sanitizeHTML(output, self.encoding)
- if self.encoding and type(output) != type(u''):
- try:
- output = unicode(output, self.encoding)
- except:
- pass
- # categories/tags/keywords/whatever are handled in _end_category
- if element == 'category':
- return output
- # store output in appropriate place(s)
- if self.inentry and not self.insource:
- if element == 'content':
- self.entries[-1].setdefault(element, [])
- contentparams = copy.deepcopy(self.contentparams)
- contentparams['value'] = output
- self.entries[-1][element].append(contentparams)
- elif element == 'link':
- self.entries[-1][element] = output
- if output:
- self.entries[-1]['links'][-1]['href'] = output
- else:
- if element == 'description':
- element = 'summary'
- self.entries[-1][element] = output
- if self.incontent:
- contentparams = copy.deepcopy(self.contentparams)
- contentparams['value'] = output
- self.entries[-1][element + '_detail'] = contentparams
- elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage):
- context = self._getContext()
- if element == 'description':
- element = 'subtitle'
- context[element] = output
- if element == 'link':
- context['links'][-1]['href'] = output
- elif self.incontent:
- contentparams = copy.deepcopy(self.contentparams)
- contentparams['value'] = output
- context[element + '_detail'] = contentparams
- return output
- def pushContent(self, tag, attrsD, defaultContentType, expectingText):
- self.incontent += 1
- self.contentparams = FeedParserDict({
- 'type': self.mapContentType(attrsD.get('type', defaultContentType)),
- 'language': self.lang,
- 'base': self.baseuri})
- self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
- self.push(tag, expectingText)
- def popContent(self, tag):
- value = self.pop(tag)
- self.incontent -= 1
- self.contentparams.clear()
- return value
- def _mapToStandardPrefix(self, name):
- colonpos = name.find(':')
- if colonpos <> -1:
- prefix = name[:colonpos]
- suffix = name[colonpos+1:]
- prefix = self.namespacemap.get(prefix, prefix)
- name = prefix + ':' + suffix
- return name
- def _getAttribute(self, attrsD, name):
- return attrsD.get(self._mapToStandardPrefix(name))
- def _isBase64(self, attrsD, contentparams):
- if attrsD.get('mode', '') == 'base64':
- return 1
- if self.contentparams['type'].startswith('text/'):
- return 0
- if self.contentparams['type'].endswith('+xml'):
- return 0
- if self.contentparams['type'].endswith('/xml'):
- return 0
- return 1
- def _itsAnHrefDamnIt(self, attrsD):
- href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
- if href:
- try:
- del attrsD['url']
- except KeyError:
- pass
- try:
- del attrsD['uri']
- except KeyError:
- pass
- attrsD['href'] = href
- return attrsD
- def _save(self, key, value):
- context = self._getContext()
- context.setdefault(key, value)
- def _start_rss(self, attrsD):
- versionmap = {'0.91': 'rss091u',
- '0.92': 'rss092',
- '0.93': 'rss093',
- '0.94': 'rss094'}
- if not self.version:
- attr_version = attrsD.get('version', '')
- version = versionmap.get(attr_version)
- if version:
- self.version = version
- elif attr_version.startswith('2.'):
- self.version = 'rss20'
- else:
- self.version = 'rss'
- def _start_dlhottitles(self, attrsD):
- self.version = 'hotrss'
- def _start_channel(self, attrsD):
- self.infeed = 1
- self._cdf_common(attrsD)
- _start_feedinfo = _start_channel
- def _cdf_common(self, attrsD):
- if attrsD.has_key('lastmod'):
- self._start_modified({})
- self.elementstack[-1][-1] = attrsD['lastmod']
- self._end_modified()
- if attrsD.has_key('href'):
- self._start_link({})
- self.elementstack[-1][-1] = attrsD['href']
- self._end_link()
- def _start_feed(self, attrsD):
- self.infeed = 1
- versionmap = {'0.1': 'atom01',
- '0.2': 'atom02',
- '0.3': 'atom03'}
- if not self.version:
- attr_version = attrsD.get('version')
- version = versionmap.get(attr_version)
- if version:
- self.version = version
- else:
- self.version = 'atom'
- def _end_channel(self):
- self.infeed = 0
- _end_feed = _end_channel
- def _start_image(self, attrsD):
- self.inimage = 1
- self.push('image', 0)
- context = self._getContext()
- context.setdefault('image', FeedParserDict())
- def _end_image(self):
- self.pop('image')
- self.inimage = 0
- def _start_textinput(self, attrsD):
- self.intextinput = 1
- self.push('textinput', 0)
- context = self._getContext()
- context.setdefault('textinput', FeedParserDict())
- _start_textInput = _start_textinput
- def _end_textinput(self):
- self.pop('textinput')
- self.intextinput = 0
- _end_textInput = _end_textinput
- def _start_author(self, attrsD):
- self.inauthor = 1
- self.push('author', 1)
- _start_managingeditor = _start_author
- _start_dc_author = _start_author
- _start_dc_creator = _start_author
- _start_itunes_author = _start_author
- def _end_author(self):
- self.pop('author')
- self.inauthor = 0
- self._sync_author_detail()
- _end_managingeditor = _end_author
- _end_dc_author = _end_author
- _end_dc_creator = _end_author
- _end_itunes_author = _end_author
- def _start_itunes_owner(self, attrsD):
- self.inpublisher = 1
- self.push('publisher', 0)
- def _end_itunes_owner(self):
- self.pop('publisher')
- self.inpublisher = 0
- self._sync_author_detail('publisher')
- def _start_contributor(self, attrsD):
- self.incontributor = 1
- context = self._getContext()
- context.setdefault('contributors', [])
- context['contributors'].append(FeedParserDict())
- self.push('contributor', 0)
- def _end_contributor(self):
- self.pop('contributor')
- self.incontributor = 0
- def _start_dc_contributor(self, attrsD):
- self.incontributor = 1
- context = self._getContext()
- context.setdefault('contributors', [])
- context['contributors'].append(FeedParserDict())
- self.push('name', 0)
- def _end_dc_contributor(self):
- self._end_name()
- self.incontributor = 0
- def _start_name(self, attrsD):
- self.push('name', 0)
- _start_itunes_name = _start_name
- def _end_name(self):
- value = self.pop('name')
- if self.inpublisher:
- self._save_author('name', value, 'publisher')
- elif self.inauthor:
- self._save_author('name', value)
- elif self.incontributor:
- self._save_contributor('name', value)
- elif self.intextinput:
- context = self._getContext()
- context['textinput']['name'] = value
- _end_itunes_name = _end_name
- def _start_width(self, attrsD):
- self.push('width', 0)
- def _end_width(self):
- value = self.pop('width')
- try:
- value = int(value)
- except:
- value = 0
- if self.inimage:
- context = self._getContext()
- context['image']['width'] = value
- def _start_height(self, attrsD):
- self.push('height', 0)
- def _end_height(self):
- value = self.pop('height')
- try:
- value = int(value)
- except:
- value = 0
- if self.inimage:
- context = self._getContext()
- context['image']['height'] = value
- def _start_url(self, attrsD):
- self.push('href', 1)
- _start_homepage = _start_url
- _start_uri = _start_url
- def _end_url(self):
- value = self.pop('href')
- if self.inauthor:
- self._save_author('href', value)
- elif self.incontributor:
- self._save_contributor('href', value)
- elif self.inimage:
- context = self._getContext()
- context['image']['href'] = value
- elif self.intextinput:
- context = self._getContext()
- context['textinput']['link'] = value
- _end_homepage = _end_url
- _end_uri = _end_url
- def _start_email(self, attrsD):
- self.push('email', 0)
- _start_itunes_email = _start_email
- def _end_email(self):
- value = self.pop('email')
- if self.inpublisher:
- self._save_author('email', value, 'publisher')
- elif self.inauthor:
- self._save_author('email', value)
- elif self.incontributor:
- self._save_contributor('email', value)
- _end_itunes_email = _end_email
- def _getContext(self):
- if self.insource:
- context = self.sourcedata
- elif self.inentry:
- context = self.entries[-1]
- else:
- context = self.feeddata
- return context
- def _save_author(self, key, value, prefix='author'):
- context = self._getContext()
- context.setdefault(prefix + '_detail', FeedParserDict())
- context[prefix + '_detail'][key] = value
- self._sync_author_detail()
- def _save_contributor(self, key, value):
- context = self._getContext()
- context.setdefault('contributors', [FeedParserDict()])
- context['contributors'][-1][key] = value
- def _sync_author_detail(self, key='author'):
- context = self._getContext()
- detail = context.get('%s_detail' % key)
- if detail:
- name = detail.get('name')
- email = detail.get('email')
- if name and email:
- context[key] = '%s (%s)' % (name, email)
- elif name:
- context[key] = name
- elif email:
- context[key] = email
- else:
- author = context.get(key)
- if not author: return
- emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author)
- if not emailmatch: return
- email = emailmatch.group(0)
- # probably a better way to do the following, but it passes all the tests
- author = author.replace(email, '')
- author = author.replace('()', '')
- author = author.strip()
- if author and (author[0] == '('):
- author = author[1:]
- if author and (author[-1] == ')'):
- author = author[:-1]
- author = author.strip()
- context.setdefault('%s_detail' % key, FeedParserDict())
- context['%s_detail' % key]['name'] = author
- context['%s_detail' % key]['email'] = email
- def _start_subtitle(self, attrsD):
- self.pushContent('subtitle', attrsD, 'text/plain', 1)
- _start_tagline = _start_subtitle
- _start_itunes_subtitle = _start_subtitle
- def _end_subtitle(self):
- self.popContent('subtitle')
- _end_tagline = _end_subtitle
- _end_itunes_subtitle = _end_subtitle
- def _start_rights(self, attrsD):
- self.pushContent('rights', attrsD, 'text/plain', 1)
- _start_dc_rights = _start_rights
- _start_copyright = _start_rights
- def _end_rights(self):
- self.popContent('rights')
- _end_dc_rights = _end_rights
- _end_copyright = _end_rights
- def _start_item(self, attrsD):
- self.entries.append(FeedParserDict())
- self.push('item', 0)
- self.inentry = 1
- self.guidislink = 0
- id = self._getAttribute(attrsD, 'rdf:about')
- if id:
- context = self._getContext()
- context['id'] = id
- self._cdf_common(attrsD)
- _start_entry = _start_item
- _start_product = _start_item
- def _end_item(self):
- self.pop('item')
- self.inentry = 0
- _end_entry = _end_item
- def _start_dc_language(self, attrsD):
- self.push('language', 1)
- _start_language = _start_dc_language
- def _end_dc_language(self):
- self.lang = self.pop('language')
- _end_language = _end_dc_language
- def _start_dc_publisher(self, attrsD):
- self.push('publisher', 1)
- _start_webmaster = _start_dc_publisher
- def _end_dc_publisher(self):
- self.pop('publisher')
- self._sync_author_detail('publisher')
- _end_webmaster = _end_dc_publisher
- def _start_published(self, attrsD):
- self.push('published', 1)
- _start_dcterms_issued = _start_published
- _start_issued = _start_published
- def _end_published(self):
- value = self.pop('published')
- self._save('published_parsed', _parse_date(value))
- _end_dcterms_issued = _end_published
- _end_issued = _end_published
- def _start_updated(self, attrsD):
- self.push('updated', 1)
- _start_modified = _start_updated
- _start_dcterms_modified = _start_updated
- _start_pubdate = _start_updated
- _start_dc_date = _start_updated
- def _end_updated(self):
- value = self.pop('updated')
- parsed_value = _parse_date(value)
- self._save('updated_parsed', parsed_value)
- _end_modified = _end_updated
- _end_dcterms_modified = _end_updated
- _end_pubdate = _end_updated
- _end_dc_date = _end_updated
- def _start_created(self, attrsD):
- self.push('created', 1)
- _start_dcterms_created = _start_created
- def _end_created(self):
- value = self.pop('created')
- self._save('created_parsed', _parse_date(value))
- _end_dcterms_created = _end_created
- def _start_expirationdate(self, attrsD):
- self.push('expired', 1)
- def _end_expirationdate(self):
- self._save('expired_parsed', _parse_date(self.pop('expired')))
- def _start_cc_license(self, attrsD):
- self.push('license', 1)
- value = self._getAttribute(attrsD, 'rdf:resource')
- if value:
- self.elementstack[-1][2].append(value)
- self.pop('license')
- def _start_creativecommons_license(self, attrsD):
- self.push('license', 1)
- def _end_creativecommons_license(self):
- self.pop('license')
- def _addTag(self, term, scheme, label):
- context = self._getContext()
- tags = context.setdefault('tags', [])
- if (not term) and (not scheme) and (not label): return
- value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
- if value not in tags:
- tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label}))
- def _start_category(self, attrsD):
- if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
- term = attrsD.get('term')
- scheme = attrsD.get('scheme', attrsD.get('domain'))
- label = attrsD.get('label')
- self._addTag(term, scheme, label)
- self.push('category', 1)
- _start_dc_subject = _start_category
- _start_keywords = _start_category
- def _end_itunes_keywords(self):
- for term in self.pop('itunes_keywords').split():
- self._addTag(term, 'http://www.itunes.com/', None)
- def _start_itunes_category(self, attrsD):
- self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
- self.push('category', 1)
- def _end_category(self):
- value = self.pop('category')
- if not value: return
- context = self._getContext()
- tags = context['tags']
- if value and len(tags) and not tags[-1]['term']:
- tags[-1]['term'] = value
- else:
- self._addTag(value, None, None)
- _end_dc_subject = _end_category
- _end_keywords = _end_category
- _end_itunes_category = _end_category
- def _start_cloud(self, attrsD):
- self._getContext()['cloud'] = FeedParserDict(attrsD)
- def _start_link(self, attrsD):
- attrsD.setdefault('rel', 'alternate')
- attrsD.setdefault('type', 'text/html')
- attrsD = self._itsAnHrefDamnIt(attrsD)
- if attrsD.has_key('href'):
- attrsD['href'] = self.resolveURI(attrsD['href'])
- expectingText = self.infeed or self.inentry or self.insource
- context = self._getContext()
- context.setdefault('links', [])
- context['links'].append(FeedParserDict(attrsD))
- if attrsD['rel'] == 'enclosure':
- self._start_enclosure(attrsD)
- if attrsD.has_key('href'):
- expectingText = 0
- if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
- context['link'] = attrsD['href']
- else:
- self.push('link', expectingText)
- _start_producturl = _start_link
- def _end_link(self):
- value = self.pop('link')
- context = self._getContext()
- if self.intextinput:
- context['textinput']['link'] = value
- if self.inimage:
- context['image']['link'] = value
- _end_producturl = _end_link
- def _start_guid(self, attrsD):
- self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
- self.push('id', 1)
- def _end_guid(self):
- value = self.pop('id')
- self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
- if self.guidislink:
- # guid acts as link, but only if 'ispermalink' is not present or is 'true',
- # and only if the item doesn't already have a link element
- self._save('link', value)
- def _start_title(self, attrsD):
- self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
- _start_dc_title = _start_title
- _start_media_title = _start_title
- def _end_title(self):
- value = self.popContent('title')
- context = self._getContext()
- if self.intextinput:
- context['textinput']['title'] = value
- elif self.inimage:
- context['image']['title'] = value
- _end_dc_title = _end_title
- _end_media_title = _end_title
- def _start_description(self, attrsD):
- context = self._getContext()
- if context.has_key('summary'):
- self._summaryKey = 'content'
- self._start_content(attrsD)
- else:
- self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
- def _start_abstract(self, attrsD):
- self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
- def _end_description(self):
- if self._summaryKey == 'content':
- self._end_content()
- else:
- value = self.popContent('description')
- context = self._getContext()
- if self.intextinput:
- context['textinput']['description'] = value
- elif self.inimage:
- context['image']['description'] = value
- self._summaryKey = None
- _end_abstract = _end_description
- def _start_info(self, attrsD):
- self.pushContent('info', attrsD, 'text/plain', 1)
- _start_feedburner_browserfriendly = _start_info
- def _end_info(self):
- self.popContent('info')
- _end_feedburner_browserfriendly = _end_info
- def _start_generator(self, attrsD):
- if attrsD:
- attrsD = self._itsAnHrefDamnIt(attrsD)
- if attrsD.has_key('href'):
- attrsD['href'] = self.resolveURI(attrsD['href'])
- self._getContext()['generator_detail'] = FeedParserDict(attrsD)
- self.push('generator', 1)
- def _end_generator(self):
- value = self.pop('generator')
- context = self._getContext()
- if context.has_key('generator_detail'):
- context['generator_detail']['name'] = value
- def _start_admin_generatoragent(self, attrsD):
- self.push('generator', 1)
- value = self._getAttribute(attrsD, 'rdf:resource')
- if value:
- self.elementstack[-1][2].append(value)
- self.pop('generator')
- self._getContext()['generator_detail'] = FeedParserDict({'href': value})
- def _start_admin_errorreportsto(self, attrsD):
- self.push('errorreportsto', 1)
- value = self._getAttribute(attrsD, 'rdf:resource')
- if value:
- self.elementstack[-1][2].append(value)
- self.pop('errorreportsto')
- def _start_summary(self, attrsD):
- context = self._getContext()
- if context.has_key('summary'):
- self._summaryKey = 'content'
- self._start_content(attrsD)
- else:
- self._summaryKey = 'summary'
- self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
- _start_itunes_summary = _start_summary
- def _end_summary(self):
- if self._summaryKey == 'content':
- self._end_content()
- else:
- self.popContent(self._summaryKey or 'summary')
- self._summaryKey = None
- _end_itunes_summary = _end_summary
- def _start_enclosure(self, attrsD):
- attrsD = self._itsAnHrefDamnIt(attrsD)
- self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD))…
Large files files are truncated, but you can click here to view the full file