PageRenderTime 46ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 1ms

/gluon/contrib/feedparser.py

https://code.google.com/p/web2py/
Python | 3987 lines | 3651 code | 151 blank | 185 comment | 239 complexity | 4f122411428e8da14291ebccd5df1792 MD5 | raw file
Possible License(s): LGPL-2.1, BSD-2-Clause, MIT, BSD-3-Clause, Apache-2.0
  1. """Universal feed parser
  2. Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
  3. Visit https://code.google.com/p/feedparser/ for the latest version
  4. Visit http://packages.python.org/feedparser/ for the latest documentation
  5. Required: Python 2.4 or later
  6. Recommended: iconv_codec <http://cjkpython.i18n.org/>
  7. """
  8. __version__ = "5.1.2"
  9. __license__ = """
  10. Copyright (c) 2010-2012 Kurt McKee <contactme@kurtmckee.org>
  11. Copyright (c) 2002-2008 Mark Pilgrim
  12. All rights reserved.
  13. Redistribution and use in source and binary forms, with or without modification,
  14. are permitted provided that the following conditions are met:
  15. * Redistributions of source code must retain the above copyright notice,
  16. this list of conditions and the following disclaimer.
  17. * Redistributions in binary form must reproduce the above copyright notice,
  18. this list of conditions and the following disclaimer in the documentation
  19. and/or other materials provided with the distribution.
  20. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
  21. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  22. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  23. ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  24. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  25. CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  26. SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  27. INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  28. CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  29. ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  30. POSSIBILITY OF SUCH DAMAGE."""
  31. __author__ = "Mark Pilgrim <http://diveintomark.org/>"
  32. __contributors__ = ["Jason Diamond <http://injektilo.org/>",
  33. "John Beimler <http://john.beimler.org/>",
  34. "Fazal Majid <http://www.majid.info/mylos/weblog/>",
  35. "Aaron Swartz <http://aaronsw.com/>",
  36. "Kevin Marks <http://epeus.blogspot.com/>",
  37. "Sam Ruby <http://intertwingly.net/>",
  38. "Ade Oshineye <http://blog.oshineye.com/>",
  39. "Martin Pool <http://sourcefrog.net/>",
  40. "Kurt McKee <http://kurtmckee.org/>"]
  41. # HTTP "User-Agent" header to send to servers when downloading feeds.
  42. # If you are embedding feedparser in a larger application, you should
  43. # change this to your application name and URL.
  44. USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
  45. # HTTP "Accept" header to send to servers when downloading feeds. If you don't
  46. # want to send an Accept header, set this to None.
  47. ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
  48. # List of preferred XML parsers, by SAX driver name. These will be tried first,
  49. # but if they're not installed, Python will keep searching through its own list
  50. # of pre-installed parsers until it finds one that supports everything we need.
  51. PREFERRED_XML_PARSERS = ["drv_libxml2"]
  52. # If you want feedparser to automatically run HTML markup through HTML Tidy, set
  53. # this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
  54. # or utidylib <http://utidylib.berlios.de/>.
  55. TIDY_MARKUP = 0
  56. # List of Python interfaces for HTML Tidy, in order of preference. Only useful
  57. # if TIDY_MARKUP = 1
  58. PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
  59. # If you want feedparser to automatically resolve all relative URIs, set this
  60. # to 1.
  61. RESOLVE_RELATIVE_URIS = 1
  62. # If you want feedparser to automatically sanitize all potentially unsafe
  63. # HTML content, set this to 1.
  64. SANITIZE_HTML = 1
  65. # If you want feedparser to automatically parse microformat content embedded
  66. # in entry contents, set this to 1
  67. PARSE_MICROFORMATS = 1
  68. # ---------- Python 3 modules (make it work if possible) ----------
  69. try:
  70. import rfc822
  71. except ImportError:
  72. from email import _parseaddr as rfc822
  73. try:
  74. # Python 3.1 introduces bytes.maketrans and simultaneously
  75. # deprecates string.maketrans; use bytes.maketrans if possible
  76. _maketrans = bytes.maketrans
  77. except (NameError, AttributeError):
  78. import string
  79. _maketrans = string.maketrans
  80. # base64 support for Atom feeds that contain embedded binary data
  81. try:
  82. import base64, binascii
  83. except ImportError:
  84. base64 = binascii = None
  85. else:
  86. # Python 3.1 deprecates decodestring in favor of decodebytes
  87. _base64decode = getattr(base64, 'decodebytes', base64.decodestring)
  88. # _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
  89. # _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
  90. try:
  91. if bytes is str:
  92. # In Python 2.5 and below, bytes doesn't exist (NameError)
  93. # In Python 2.6 and above, bytes and str are the same type
  94. raise NameError
  95. except NameError:
  96. # Python 2
  97. def _s2bytes(s):
  98. return s
  99. def _l2bytes(l):
  100. return ''.join(map(chr, l))
  101. else:
  102. # Python 3
  103. def _s2bytes(s):
  104. return bytes(s, 'utf8')
  105. def _l2bytes(l):
  106. return bytes(l)
  107. # If you want feedparser to allow all URL schemes, set this to ()
  108. # List culled from Python's urlparse documentation at:
  109. # http://docs.python.org/library/urlparse.html
  110. # as well as from "URI scheme" at Wikipedia:
  111. # https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
  112. # Many more will likely need to be added!
  113. ACCEPTABLE_URI_SCHEMES = (
  114. 'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
  115. 'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
  116. 'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
  117. 'wais',
  118. # Additional common-but-unofficial schemes
  119. 'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
  120. 'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
  121. )
  122. #ACCEPTABLE_URI_SCHEMES = ()
  123. # ---------- required modules (should come with any Python distribution) ----------
  124. import cgi
  125. import codecs
  126. import copy
  127. import datetime
  128. import re
  129. import struct
  130. import time
  131. import types
  132. import urllib
  133. import urllib2
  134. import urlparse
  135. import warnings
  136. from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
  137. try:
  138. from io import BytesIO as _StringIO
  139. except ImportError:
  140. try:
  141. from cStringIO import StringIO as _StringIO
  142. except ImportError:
  143. from StringIO import StringIO as _StringIO
  144. # ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
  145. # gzip is included with most Python distributions, but may not be available if you compiled your own
  146. try:
  147. import gzip
  148. except ImportError:
  149. gzip = None
  150. try:
  151. import zlib
  152. except ImportError:
  153. zlib = None
  154. # If a real XML parser is available, feedparser will attempt to use it. feedparser has
  155. # been tested with the built-in SAX parser and libxml2. On platforms where the
  156. # Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
  157. # versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
  158. try:
  159. import xml.sax
  160. from xml.sax.saxutils import escape as _xmlescape
  161. except ImportError:
  162. _XML_AVAILABLE = 0
  163. def _xmlescape(data,entities={}):
  164. data = data.replace('&', '&amp;')
  165. data = data.replace('>', '&gt;')
  166. data = data.replace('<', '&lt;')
  167. for char, entity in entities:
  168. data = data.replace(char, entity)
  169. return data
  170. else:
  171. try:
  172. xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
  173. except xml.sax.SAXReaderNotAvailable:
  174. _XML_AVAILABLE = 0
  175. else:
  176. _XML_AVAILABLE = 1
  177. # sgmllib is not available by default in Python 3; if the end user doesn't have
  178. # it available then we'll lose illformed XML parsing, content santizing, and
  179. # microformat support (at least while feedparser depends on BeautifulSoup).
  180. try:
  181. import sgmllib
  182. except ImportError:
  183. # This is probably Python 3, which doesn't include sgmllib anymore
  184. _SGML_AVAILABLE = 0
  185. # Mock sgmllib enough to allow subclassing later on
  186. class sgmllib(object):
  187. class SGMLParser(object):
  188. def goahead(self, i):
  189. pass
  190. def parse_starttag(self, i):
  191. pass
  192. else:
  193. _SGML_AVAILABLE = 1
  194. # sgmllib defines a number of module-level regular expressions that are
  195. # insufficient for the XML parsing feedparser needs. Rather than modify
  196. # the variables directly in sgmllib, they're defined here using the same
  197. # names, and the compiled code objects of several sgmllib.SGMLParser
  198. # methods are copied into _BaseHTMLProcessor so that they execute in
  199. # feedparser's scope instead of sgmllib's scope.
  200. charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
  201. tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
  202. attrfind = re.compile(
  203. r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
  204. r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
  205. )
  206. # Unfortunately, these must be copied over to prevent NameError exceptions
  207. entityref = sgmllib.entityref
  208. incomplete = sgmllib.incomplete
  209. interesting = sgmllib.interesting
  210. shorttag = sgmllib.shorttag
  211. shorttagopen = sgmllib.shorttagopen
  212. starttagopen = sgmllib.starttagopen
  213. class _EndBracketRegEx:
  214. def __init__(self):
  215. # Overriding the built-in sgmllib.endbracket regex allows the
  216. # parser to find angle brackets embedded in element attributes.
  217. self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
  218. def search(self, target, index=0):
  219. match = self.endbracket.match(target, index)
  220. if match is not None:
  221. # Returning a new object in the calling thread's context
  222. # resolves a thread-safety.
  223. return EndBracketMatch(match)
  224. return None
  225. class EndBracketMatch:
  226. def __init__(self, match):
  227. self.match = match
  228. def start(self, n):
  229. return self.match.end(n)
  230. endbracket = _EndBracketRegEx()
  231. # iconv_codec provides support for more character encodings.
  232. # It's available from http://cjkpython.i18n.org/
  233. try:
  234. import iconv_codec
  235. except ImportError:
  236. pass
  237. # chardet library auto-detects character encodings
  238. # Download from http://chardet.feedparser.org/
  239. try:
  240. import chardet
  241. except ImportError:
  242. chardet = None
  243. # BeautifulSoup is used to extract microformat content from HTML
  244. # feedparser is tested using BeautifulSoup 3.2.0
  245. # http://www.crummy.com/software/BeautifulSoup/
  246. try:
  247. import BeautifulSoup
  248. except ImportError:
  249. BeautifulSoup = None
  250. PARSE_MICROFORMATS = False
  251. # ---------- don't touch these ----------
  252. class ThingsNobodyCaresAboutButMe(Exception): pass
  253. class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
  254. class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
  255. class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
  256. class UndeclaredNamespace(Exception): pass
  257. SUPPORTED_VERSIONS = {'': u'unknown',
  258. 'rss090': u'RSS 0.90',
  259. 'rss091n': u'RSS 0.91 (Netscape)',
  260. 'rss091u': u'RSS 0.91 (Userland)',
  261. 'rss092': u'RSS 0.92',
  262. 'rss093': u'RSS 0.93',
  263. 'rss094': u'RSS 0.94',
  264. 'rss20': u'RSS 2.0',
  265. 'rss10': u'RSS 1.0',
  266. 'rss': u'RSS (unknown version)',
  267. 'atom01': u'Atom 0.1',
  268. 'atom02': u'Atom 0.2',
  269. 'atom03': u'Atom 0.3',
  270. 'atom10': u'Atom 1.0',
  271. 'atom': u'Atom (unknown version)',
  272. 'cdf': u'CDF',
  273. }
  274. class FeedParserDict(dict):
  275. keymap = {'channel': 'feed',
  276. 'items': 'entries',
  277. 'guid': 'id',
  278. 'date': 'updated',
  279. 'date_parsed': 'updated_parsed',
  280. 'description': ['summary', 'subtitle'],
  281. 'description_detail': ['summary_detail', 'subtitle_detail'],
  282. 'url': ['href'],
  283. 'modified': 'updated',
  284. 'modified_parsed': 'updated_parsed',
  285. 'issued': 'published',
  286. 'issued_parsed': 'published_parsed',
  287. 'copyright': 'rights',
  288. 'copyright_detail': 'rights_detail',
  289. 'tagline': 'subtitle',
  290. 'tagline_detail': 'subtitle_detail'}
  291. def __getitem__(self, key):
  292. if key == 'category':
  293. try:
  294. return dict.__getitem__(self, 'tags')[0]['term']
  295. except IndexError:
  296. raise KeyError, "object doesn't have key 'category'"
  297. elif key == 'enclosures':
  298. norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
  299. return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
  300. elif key == 'license':
  301. for link in dict.__getitem__(self, 'links'):
  302. if link['rel']==u'license' and 'href' in link:
  303. return link['href']
  304. elif key == 'updated':
  305. # Temporarily help developers out by keeping the old
  306. # broken behavior that was reported in issue 310.
  307. # This fix was proposed in issue 328.
  308. if not dict.__contains__(self, 'updated') and \
  309. dict.__contains__(self, 'published'):
  310. warnings.warn("To avoid breaking existing software while "
  311. "fixing issue 310, a temporary mapping has been created "
  312. "from `updated` to `published` if `updated` doesn't "
  313. "exist. This fallback will be removed in a future version "
  314. "of feedparser.", DeprecationWarning)
  315. return dict.__getitem__(self, 'published')
  316. return dict.__getitem__(self, 'updated')
  317. elif key == 'updated_parsed':
  318. if not dict.__contains__(self, 'updated_parsed') and \
  319. dict.__contains__(self, 'published_parsed'):
  320. warnings.warn("To avoid breaking existing software while "
  321. "fixing issue 310, a temporary mapping has been created "
  322. "from `updated_parsed` to `published_parsed` if "
  323. "`updated_parsed` doesn't exist. This fallback will be "
  324. "removed in a future version of feedparser.",
  325. DeprecationWarning)
  326. return dict.__getitem__(self, 'published_parsed')
  327. return dict.__getitem__(self, 'updated_parsed')
  328. else:
  329. realkey = self.keymap.get(key, key)
  330. if isinstance(realkey, list):
  331. for k in realkey:
  332. if dict.__contains__(self, k):
  333. return dict.__getitem__(self, k)
  334. elif dict.__contains__(self, realkey):
  335. return dict.__getitem__(self, realkey)
  336. return dict.__getitem__(self, key)
  337. def __contains__(self, key):
  338. if key in ('updated', 'updated_parsed'):
  339. # Temporarily help developers out by keeping the old
  340. # broken behavior that was reported in issue 310.
  341. # This fix was proposed in issue 328.
  342. return dict.__contains__(self, key)
  343. try:
  344. self.__getitem__(key)
  345. except KeyError:
  346. return False
  347. else:
  348. return True
  349. has_key = __contains__
  350. def get(self, key, default=None):
  351. try:
  352. return self.__getitem__(key)
  353. except KeyError:
  354. return default
  355. def __setitem__(self, key, value):
  356. key = self.keymap.get(key, key)
  357. if isinstance(key, list):
  358. key = key[0]
  359. return dict.__setitem__(self, key, value)
  360. def setdefault(self, key, value):
  361. if key not in self:
  362. self[key] = value
  363. return value
  364. return self[key]
  365. def __getattr__(self, key):
  366. # __getattribute__() is called first; this will be called
  367. # only if an attribute was not already found
  368. try:
  369. return self.__getitem__(key)
  370. except KeyError:
  371. raise AttributeError, "object has no attribute '%s'" % key
  372. def __hash__(self):
  373. return id(self)
  374. _cp1252 = {
  375. 128: unichr(8364), # euro sign
  376. 130: unichr(8218), # single low-9 quotation mark
  377. 131: unichr( 402), # latin small letter f with hook
  378. 132: unichr(8222), # double low-9 quotation mark
  379. 133: unichr(8230), # horizontal ellipsis
  380. 134: unichr(8224), # dagger
  381. 135: unichr(8225), # double dagger
  382. 136: unichr( 710), # modifier letter circumflex accent
  383. 137: unichr(8240), # per mille sign
  384. 138: unichr( 352), # latin capital letter s with caron
  385. 139: unichr(8249), # single left-pointing angle quotation mark
  386. 140: unichr( 338), # latin capital ligature oe
  387. 142: unichr( 381), # latin capital letter z with caron
  388. 145: unichr(8216), # left single quotation mark
  389. 146: unichr(8217), # right single quotation mark
  390. 147: unichr(8220), # left double quotation mark
  391. 148: unichr(8221), # right double quotation mark
  392. 149: unichr(8226), # bullet
  393. 150: unichr(8211), # en dash
  394. 151: unichr(8212), # em dash
  395. 152: unichr( 732), # small tilde
  396. 153: unichr(8482), # trade mark sign
  397. 154: unichr( 353), # latin small letter s with caron
  398. 155: unichr(8250), # single right-pointing angle quotation mark
  399. 156: unichr( 339), # latin small ligature oe
  400. 158: unichr( 382), # latin small letter z with caron
  401. 159: unichr( 376), # latin capital letter y with diaeresis
  402. }
  403. _urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
  404. def _urljoin(base, uri):
  405. uri = _urifixer.sub(r'\1\3', uri)
  406. #try:
  407. if not isinstance(uri, unicode):
  408. uri = uri.decode('utf-8', 'ignore')
  409. uri = urlparse.urljoin(base, uri)
  410. if not isinstance(uri, unicode):
  411. return uri.decode('utf-8', 'ignore')
  412. return uri
  413. #except:
  414. # uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
  415. # return urlparse.urljoin(base, uri)
  416. class _FeedParserMixin:
  417. namespaces = {
  418. '': '',
  419. 'http://backend.userland.com/rss': '',
  420. 'http://blogs.law.harvard.edu/tech/rss': '',
  421. 'http://purl.org/rss/1.0/': '',
  422. 'http://my.netscape.com/rdf/simple/0.9/': '',
  423. 'http://example.com/newformat#': '',
  424. 'http://example.com/necho': '',
  425. 'http://purl.org/echo/': '',
  426. 'uri/of/echo/namespace#': '',
  427. 'http://purl.org/pie/': '',
  428. 'http://purl.org/atom/ns#': '',
  429. 'http://www.w3.org/2005/Atom': '',
  430. 'http://purl.org/rss/1.0/modules/rss091#': '',
  431. 'http://webns.net/mvcb/': 'admin',
  432. 'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
  433. 'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
  434. 'http://media.tangent.org/rss/1.0/': 'audio',
  435. 'http://backend.userland.com/blogChannelModule': 'blogChannel',
  436. 'http://web.resource.org/cc/': 'cc',
  437. 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
  438. 'http://purl.org/rss/1.0/modules/company': 'co',
  439. 'http://purl.org/rss/1.0/modules/content/': 'content',
  440. 'http://my.theinfo.org/changed/1.0/rss/': 'cp',
  441. 'http://purl.org/dc/elements/1.1/': 'dc',
  442. 'http://purl.org/dc/terms/': 'dcterms',
  443. 'http://purl.org/rss/1.0/modules/email/': 'email',
  444. 'http://purl.org/rss/1.0/modules/event/': 'ev',
  445. 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
  446. 'http://freshmeat.net/rss/fm/': 'fm',
  447. 'http://xmlns.com/foaf/0.1/': 'foaf',
  448. 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
  449. 'http://postneo.com/icbm/': 'icbm',
  450. 'http://purl.org/rss/1.0/modules/image/': 'image',
  451. 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
  452. 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
  453. 'http://purl.org/rss/1.0/modules/link/': 'l',
  454. 'http://search.yahoo.com/mrss': 'media',
  455. # Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
  456. 'http://search.yahoo.com/mrss/': 'media',
  457. 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
  458. 'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
  459. 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
  460. 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
  461. 'http://purl.org/rss/1.0/modules/reference/': 'ref',
  462. 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
  463. 'http://purl.org/rss/1.0/modules/search/': 'search',
  464. 'http://purl.org/rss/1.0/modules/slash/': 'slash',
  465. 'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
  466. 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
  467. 'http://hacks.benhammersley.com/rss/streaming/': 'str',
  468. 'http://purl.org/rss/1.0/modules/subscription/': 'sub',
  469. 'http://purl.org/rss/1.0/modules/syndication/': 'sy',
  470. 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
  471. 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
  472. 'http://purl.org/rss/1.0/modules/threading/': 'thr',
  473. 'http://purl.org/rss/1.0/modules/textinput/': 'ti',
  474. 'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
  475. 'http://wellformedweb.org/commentAPI/': 'wfw',
  476. 'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
  477. 'http://www.w3.org/1999/xhtml': 'xhtml',
  478. 'http://www.w3.org/1999/xlink': 'xlink',
  479. 'http://www.w3.org/XML/1998/namespace': 'xml',
  480. }
  481. _matchnamespaces = {}
  482. can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
  483. can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
  484. can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
  485. html_types = [u'text/html', u'application/xhtml+xml']
  486. def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
  487. if not self._matchnamespaces:
  488. for k, v in self.namespaces.items():
  489. self._matchnamespaces[k.lower()] = v
  490. self.feeddata = FeedParserDict() # feed-level data
  491. self.encoding = encoding # character encoding
  492. self.entries = [] # list of entry-level data
  493. self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
  494. self.namespacesInUse = {} # dictionary of namespaces defined by the feed
  495. # the following are used internally to track state;
  496. # this is really out of control and should be refactored
  497. self.infeed = 0
  498. self.inentry = 0
  499. self.incontent = 0
  500. self.intextinput = 0
  501. self.inimage = 0
  502. self.inauthor = 0
  503. self.incontributor = 0
  504. self.inpublisher = 0
  505. self.insource = 0
  506. self.sourcedata = FeedParserDict()
  507. self.contentparams = FeedParserDict()
  508. self._summaryKey = None
  509. self.namespacemap = {}
  510. self.elementstack = []
  511. self.basestack = []
  512. self.langstack = []
  513. self.baseuri = baseuri or u''
  514. self.lang = baselang or None
  515. self.svgOK = 0
  516. self.title_depth = -1
  517. self.depth = 0
  518. if baselang:
  519. self.feeddata['language'] = baselang.replace('_','-')
  520. # A map of the following form:
  521. # {
  522. # object_that_value_is_set_on: {
  523. # property_name: depth_of_node_property_was_extracted_from,
  524. # other_property: depth_of_node_property_was_extracted_from,
  525. # },
  526. # }
  527. self.property_depth_map = {}
  528. def _normalize_attributes(self, kv):
  529. k = kv[0].lower()
  530. v = k in ('rel', 'type') and kv[1].lower() or kv[1]
  531. # the sgml parser doesn't handle entities in attributes, nor
  532. # does it pass the attribute values through as unicode, while
  533. # strict xml parsers do -- account for this difference
  534. if isinstance(self, _LooseFeedParser):
  535. v = v.replace('&amp;', '&')
  536. if not isinstance(v, unicode):
  537. v = v.decode('utf-8')
  538. return (k, v)
  539. def unknown_starttag(self, tag, attrs):
  540. # increment depth counter
  541. self.depth += 1
  542. # normalize attrs
  543. attrs = map(self._normalize_attributes, attrs)
  544. # track xml:base and xml:lang
  545. attrsD = dict(attrs)
  546. baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
  547. if not isinstance(baseuri, unicode):
  548. baseuri = baseuri.decode(self.encoding, 'ignore')
  549. # ensure that self.baseuri is always an absolute URI that
  550. # uses a whitelisted URI scheme (e.g. not `javscript:`)
  551. if self.baseuri:
  552. self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
  553. else:
  554. self.baseuri = _urljoin(self.baseuri, baseuri)
  555. lang = attrsD.get('xml:lang', attrsD.get('lang'))
  556. if lang == '':
  557. # xml:lang could be explicitly set to '', we need to capture that
  558. lang = None
  559. elif lang is None:
  560. # if no xml:lang is specified, use parent lang
  561. lang = self.lang
  562. if lang:
  563. if tag in ('feed', 'rss', 'rdf:RDF'):
  564. self.feeddata['language'] = lang.replace('_','-')
  565. self.lang = lang
  566. self.basestack.append(self.baseuri)
  567. self.langstack.append(lang)
  568. # track namespaces
  569. for prefix, uri in attrs:
  570. if prefix.startswith('xmlns:'):
  571. self.trackNamespace(prefix[6:], uri)
  572. elif prefix == 'xmlns':
  573. self.trackNamespace(None, uri)
  574. # track inline content
  575. if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
  576. if tag in ('xhtml:div', 'div'):
  577. return # typepad does this 10/2007
  578. # element declared itself as escaped markup, but it isn't really
  579. self.contentparams['type'] = u'application/xhtml+xml'
  580. if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
  581. if tag.find(':') <> -1:
  582. prefix, tag = tag.split(':', 1)
  583. namespace = self.namespacesInUse.get(prefix, '')
  584. if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
  585. attrs.append(('xmlns',namespace))
  586. if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
  587. attrs.append(('xmlns',namespace))
  588. if tag == 'svg':
  589. self.svgOK += 1
  590. return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
  591. # match namespaces
  592. if tag.find(':') <> -1:
  593. prefix, suffix = tag.split(':', 1)
  594. else:
  595. prefix, suffix = '', tag
  596. prefix = self.namespacemap.get(prefix, prefix)
  597. if prefix:
  598. prefix = prefix + '_'
  599. # special hack for better tracking of empty textinput/image elements in illformed feeds
  600. if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
  601. self.intextinput = 0
  602. if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
  603. self.inimage = 0
  604. # call special handler (if defined) or default handler
  605. methodname = '_start_' + prefix + suffix
  606. try:
  607. method = getattr(self, methodname)
  608. return method(attrsD)
  609. except AttributeError:
  610. # Since there's no handler or something has gone wrong we explicitly add the element and its attributes
  611. unknown_tag = prefix + suffix
  612. if len(attrsD) == 0:
  613. # No attributes so merge it into the encosing dictionary
  614. return self.push(unknown_tag, 1)
  615. else:
  616. # Has attributes so create it in its own dictionary
  617. context = self._getContext()
  618. context[unknown_tag] = attrsD
  619. def unknown_endtag(self, tag):
  620. # match namespaces
  621. if tag.find(':') <> -1:
  622. prefix, suffix = tag.split(':', 1)
  623. else:
  624. prefix, suffix = '', tag
  625. prefix = self.namespacemap.get(prefix, prefix)
  626. if prefix:
  627. prefix = prefix + '_'
  628. if suffix == 'svg' and self.svgOK:
  629. self.svgOK -= 1
  630. # call special handler (if defined) or default handler
  631. methodname = '_end_' + prefix + suffix
  632. try:
  633. if self.svgOK:
  634. raise AttributeError()
  635. method = getattr(self, methodname)
  636. method()
  637. except AttributeError:
  638. self.pop(prefix + suffix)
  639. # track inline content
  640. if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
  641. # element declared itself as escaped markup, but it isn't really
  642. if tag in ('xhtml:div', 'div'):
  643. return # typepad does this 10/2007
  644. self.contentparams['type'] = u'application/xhtml+xml'
  645. if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
  646. tag = tag.split(':')[-1]
  647. self.handle_data('</%s>' % tag, escape=0)
  648. # track xml:base and xml:lang going out of scope
  649. if self.basestack:
  650. self.basestack.pop()
  651. if self.basestack and self.basestack[-1]:
  652. self.baseuri = self.basestack[-1]
  653. if self.langstack:
  654. self.langstack.pop()
  655. if self.langstack: # and (self.langstack[-1] is not None):
  656. self.lang = self.langstack[-1]
  657. self.depth -= 1
  658. def handle_charref(self, ref):
  659. # called for each character reference, e.g. for '&#160;', ref will be '160'
  660. if not self.elementstack:
  661. return
  662. ref = ref.lower()
  663. if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
  664. text = '&#%s;' % ref
  665. else:
  666. if ref[0] == 'x':
  667. c = int(ref[1:], 16)
  668. else:
  669. c = int(ref)
  670. text = unichr(c).encode('utf-8')
  671. self.elementstack[-1][2].append(text)
  672. def handle_entityref(self, ref):
  673. # called for each entity reference, e.g. for '&copy;', ref will be 'copy'
  674. if not self.elementstack:
  675. return
  676. if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
  677. text = '&%s;' % ref
  678. elif ref in self.entities:
  679. text = self.entities[ref]
  680. if text.startswith('&#') and text.endswith(';'):
  681. return self.handle_entityref(text)
  682. else:
  683. try:
  684. name2codepoint[ref]
  685. except KeyError:
  686. text = '&%s;' % ref
  687. else:
  688. text = unichr(name2codepoint[ref]).encode('utf-8')
  689. self.elementstack[-1][2].append(text)
  690. def handle_data(self, text, escape=1):
  691. # called for each block of plain text, i.e. outside of any tag and
  692. # not containing any character or entity references
  693. if not self.elementstack:
  694. return
  695. if escape and self.contentparams.get('type') == u'application/xhtml+xml':
  696. text = _xmlescape(text)
  697. self.elementstack[-1][2].append(text)
  698. def handle_comment(self, text):
  699. # called for each comment, e.g. <!-- insert message here -->
  700. pass
  701. def handle_pi(self, text):
  702. # called for each processing instruction, e.g. <?instruction>
  703. pass
  704. def handle_decl(self, text):
  705. pass
  706. def parse_declaration(self, i):
  707. # override internal declaration handler to handle CDATA blocks
  708. if self.rawdata[i:i+9] == '<![CDATA[':
  709. k = self.rawdata.find(']]>', i)
  710. if k == -1:
  711. # CDATA block began but didn't finish
  712. k = len(self.rawdata)
  713. return k
  714. self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
  715. return k+3
  716. else:
  717. k = self.rawdata.find('>', i)
  718. if k >= 0:
  719. return k+1
  720. else:
  721. # We have an incomplete CDATA block.
  722. return k
  723. def mapContentType(self, contentType):
  724. contentType = contentType.lower()
  725. if contentType == 'text' or contentType == 'plain':
  726. contentType = u'text/plain'
  727. elif contentType == 'html':
  728. contentType = u'text/html'
  729. elif contentType == 'xhtml':
  730. contentType = u'application/xhtml+xml'
  731. return contentType
  732. def trackNamespace(self, prefix, uri):
  733. loweruri = uri.lower()
  734. if not self.version:
  735. if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
  736. self.version = u'rss090'
  737. elif loweruri == 'http://purl.org/rss/1.0/':
  738. self.version = u'rss10'
  739. elif loweruri == 'http://www.w3.org/2005/atom':
  740. self.version = u'atom10'
  741. if loweruri.find(u'backend.userland.com/rss') <> -1:
  742. # match any backend.userland.com namespace
  743. uri = u'http://backend.userland.com/rss'
  744. loweruri = uri
  745. if loweruri in self._matchnamespaces:
  746. self.namespacemap[prefix] = self._matchnamespaces[loweruri]
  747. self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
  748. else:
  749. self.namespacesInUse[prefix or ''] = uri
  750. def resolveURI(self, uri):
  751. return _urljoin(self.baseuri or u'', uri)
  752. def decodeEntities(self, element, data):
  753. return data
  754. def strattrs(self, attrs):
  755. return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'&quot;'})) for t in attrs])
  756. def push(self, element, expectingText):
  757. self.elementstack.append([element, expectingText, []])
  758. def pop(self, element, stripWhitespace=1):
  759. if not self.elementstack:
  760. return
  761. if self.elementstack[-1][0] != element:
  762. return
  763. element, expectingText, pieces = self.elementstack.pop()
  764. if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
  765. # remove enclosing child element, but only if it is a <div> and
  766. # only if all the remaining content is nested underneath it.
  767. # This means that the divs would be retained in the following:
  768. # <div>foo</div><div>bar</div>
  769. while pieces and len(pieces)>1 and not pieces[-1].strip():
  770. del pieces[-1]
  771. while pieces and len(pieces)>1 and not pieces[0].strip():
  772. del pieces[0]
  773. if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
  774. depth = 0
  775. for piece in pieces[:-1]:
  776. if piece.startswith('</'):
  777. depth -= 1
  778. if depth == 0:
  779. break
  780. elif piece.startswith('<') and not piece.endswith('/>'):
  781. depth += 1
  782. else:
  783. pieces = pieces[1:-1]
  784. # Ensure each piece is a str for Python 3
  785. for (i, v) in enumerate(pieces):
  786. if not isinstance(v, unicode):
  787. pieces[i] = v.decode('utf-8')
  788. output = u''.join(pieces)
  789. if stripWhitespace:
  790. output = output.strip()
  791. if not expectingText:
  792. return output
  793. # decode base64 content
  794. if base64 and self.contentparams.get('base64', 0):
  795. try:
  796. output = _base64decode(output)
  797. except binascii.Error:
  798. pass
  799. except binascii.Incomplete:
  800. pass
  801. except TypeError:
  802. # In Python 3, base64 takes and outputs bytes, not str
  803. # This may not be the most correct way to accomplish this
  804. output = _base64decode(output.encode('utf-8')).decode('utf-8')
  805. # resolve relative URIs
  806. if (element in self.can_be_relative_uri) and output:
  807. output = self.resolveURI(output)
  808. # decode entities within embedded markup
  809. if not self.contentparams.get('base64', 0):
  810. output = self.decodeEntities(element, output)
  811. # some feed formats require consumers to guess
  812. # whether the content is html or plain text
  813. if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
  814. if self.lookslikehtml(output):
  815. self.contentparams['type'] = u'text/html'
  816. # remove temporary cruft from contentparams
  817. try:
  818. del self.contentparams['mode']
  819. except KeyError:
  820. pass
  821. try:
  822. del self.contentparams['base64']
  823. except KeyError:
  824. pass
  825. is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
  826. # resolve relative URIs within embedded markup
  827. if is_htmlish and RESOLVE_RELATIVE_URIS:
  828. if element in self.can_contain_relative_uris:
  829. output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
  830. # parse microformats
  831. # (must do this before sanitizing because some microformats
  832. # rely on elements that we sanitize)
  833. if PARSE_MICROFORMATS and is_htmlish and element in ['content', 'description', 'summary']:
  834. mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
  835. if mfresults:
  836. for tag in mfresults.get('tags', []):
  837. self._addTag(tag['term'], tag['scheme'], tag['label'])
  838. for enclosure in mfresults.get('enclosures', []):
  839. self._start_enclosure(enclosure)
  840. for xfn in mfresults.get('xfn', []):
  841. self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
  842. vcard = mfresults.get('vcard')
  843. if vcard:
  844. self._getContext()['vcard'] = vcard
  845. # sanitize embedded markup
  846. if is_htmlish and SANITIZE_HTML:
  847. if element in self.can_contain_dangerous_markup:
  848. output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
  849. if self.encoding and not isinstance(output, unicode):
  850. output = output.decode(self.encoding, 'ignore')
  851. # address common error where people take data that is already
  852. # utf-8, presume that it is iso-8859-1, and re-encode it.
  853. if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
  854. try:
  855. output = output.encode('iso-8859-1').decode('utf-8')
  856. except (UnicodeEncodeError, UnicodeDecodeError):
  857. pass
  858. # map win-1252 extensions to the proper code points
  859. if isinstance(output, unicode):
  860. output = output.translate(_cp1252)
  861. # categories/tags/keywords/whatever are handled in _end_category
  862. if element == 'category':
  863. return output
  864. if element == 'title' and -1 < self.title_depth <= self.depth:
  865. return output
  866. # store output in appropriate place(s)
  867. if self.inentry and not self.insource:
  868. if element == 'content':
  869. self.entries[-1].setdefault(element, [])
  870. contentparams = copy.deepcopy(self.contentparams)
  871. contentparams['value'] = output
  872. self.entries[-1][element].append(contentparams)
  873. elif element == 'link':
  874. if not self.inimage:
  875. # query variables in urls in link elements are improperly
  876. # converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
  877. # unhandled character references. fix this special case.
  878. output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
  879. self.entries[-1][element] = output
  880. if output:
  881. self.entries[-1]['links'][-1]['href'] = output
  882. else:
  883. if element == 'description':
  884. element = 'summary'
  885. old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
  886. if old_value_depth is None or self.depth <= old_value_depth:
  887. self.property_depth_map[self.entries[-1]][element] = self.depth
  888. self.entries[-1][element] = output
  889. if self.incontent:
  890. contentparams = copy.deepcopy(self.contentparams)
  891. contentparams['value'] = output
  892. self.entries[-1][element + '_detail'] = contentparams
  893. elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
  894. context = self._getContext()
  895. if element == 'description':
  896. element = 'subtitle'
  897. context[element] = output
  898. if element == 'link':
  899. # fix query variables; see above for the explanation
  900. output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
  901. context[element] = output
  902. context['links'][-1]['href'] = output
  903. elif self.incontent:
  904. contentparams = copy.deepcopy(self.contentparams)
  905. contentparams['value'] = output
  906. context[element + '_detail'] = contentparams
  907. return output
  908. def pushContent(self, tag, attrsD, defaultContentType, expectingText):
  909. self.incontent += 1
  910. if self.lang:
  911. self.lang=self.lang.replace('_','-')
  912. self.contentparams = FeedParserDict({
  913. 'type': self.mapContentType(attrsD.get('type', defaultContentType)),
  914. 'language': self.lang,
  915. 'base': self.baseuri})
  916. self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
  917. self.push(tag, expectingText)
  918. def popContent(self, tag):
  919. value = self.pop(tag)
  920. self.incontent -= 1
  921. self.contentparams.clear()
  922. return value
  923. # a number of elements in a number of RSS variants are nominally plain
  924. # text, but this is routinely ignored. This is an attempt to detect
  925. # the most common cases. As false positives often result in silent
  926. # data loss, this function errs on the conservative side.
  927. @staticmethod
  928. def lookslikehtml(s):
  929. # must have a close tag or an entity reference to qualify
  930. if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
  931. return
  932. # all tags must be in a restricted subset of valid HTML tags
  933. if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
  934. re.findall(r'</?(\w+)',s)):
  935. return
  936. # all entities must have been defined as valid HTML entities
  937. if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
  938. return
  939. return 1
  940. def _mapToStandardPrefix(self, name):
  941. colonpos = name.find(':')
  942. if colonpos <> -1:
  943. prefix = name[:colonpos]
  944. suffix = name[colonpos+1:]
  945. prefix = self.namespacemap.get(prefix, prefix)
  946. name = prefix + ':' + suffix
  947. return name
  948. def _getAttribute(self, attrsD, name):
  949. return attrsD.get(self._mapToStandardPrefix(name))
  950. def _isBase64(self, attrsD, contentparams):
  951. if attrsD.get('mode', '') == 'base64':
  952. return 1
  953. if self.contentparams['type'].startswith(u'text/'):
  954. return 0
  955. if self.contentparams['type'].endswith(u'+xml'):
  956. return 0
  957. if self.contentparams['type'].endswith(u'/xml'):
  958. return 0
  959. return 1
  960. def _itsAnHrefDamnIt(self, attrsD):
  961. href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
  962. if href:
  963. try:
  964. del attrsD['url']
  965. except KeyError:
  966. pass
  967. try:
  968. del attrsD['uri']
  969. except KeyError:
  970. pass
  971. attrsD['href'] = href
  972. return attrsD
  973. def _save(self, key, value, overwrite=False):
  974. context = self._getContext()
  975. if overwrite:
  976. context[key] = value
  977. else:
  978. context.setdefault(key, value)
  979. def _start_rss(self, attrsD):
  980. versionmap = {'0.91': u'rss091u',
  981. '0.92': u'rss092',
  982. '0.93': u'rss093',
  983. '0.94': u'rss094'}
  984. #If we're here then this is an RSS feed.
  985. #If we don't have a version or have a version that starts with something
  986. #other than RSS then there's been a mistake. Correct it.
  987. if not self.version or not self.version.startswith(u'rss'):
  988. attr_version = attrsD.get('version', '')
  989. version = versionmap.get(attr_version)
  990. if version:
  991. self.version = version
  992. elif attr_version.startswith('2.'):
  993. self.version = u'rss20'
  994. else:
  995. self.version = u'rss'
  996. def _start_channel(self, attrsD):
  997. self.infeed = 1
  998. self._cdf_common(attrsD)
  999. def _cdf_common(self, attrsD):
  1000. if 'lastmod' in attrsD:
  1001. self._start_modified({})
  1002. self.elementstack[-1][-1] = attrsD['lastmod']
  1003. self._end_modified()
  1004. if 'href' in attrsD:
  1005. self._start_link({})
  1006. self.elementstack[-1][-1] = attrsD['href']
  1007. self._end_link()
  1008. def _start_feed(self, attrsD):
  1009. self.infeed = 1
  1010. versionmap = {'0.1': u'atom01',
  1011. '0.2': u'atom02',
  1012. '0.3': u'atom03'}
  1013. if not self.version:
  1014. attr_version = attrsD.get('version')
  1015. version = versionmap.get(attr_version)
  1016. if version:
  1017. self.version = version
  1018. else:
  1019. self.version = u'atom'
  1020. def _end_channel(self):
  1021. self.infeed = 0
  1022. _end_feed = _end_channel
  1023. def _start_image(self, attrsD):
  1024. context = self._getContext()
  1025. if not self.inentry:
  1026. context.setdefault('image', FeedParserDict())
  1027. self.inimage = 1
  1028. self.title_depth = -1
  1029. self.push('image', 0)
  1030. def _end_image(self):
  1031. self.pop('image')
  1032. self.inimage = 0
  1033. def _start_textinput(self, attrsD):
  1034. context = self._getContext()
  1035. context.setdefault('textinput', FeedParserDict())
  1036. self.intextinput = 1
  1037. self.title_depth = -1
  1038. self.push('textinput', 0)
  1039. _start_textInput = _start_textinput
  1040. def _end_textinput(self):
  1041. self.pop('textinput')
  1042. self.intextinput = 0
  1043. _end_textInput = _end_textinput
  1044. def _start_author(self, attrsD):
  1045. self.inauthor = 1
  1046. self.push('author', 1)
  1047. # Append a new FeedParserDict when expecting an author
  1048. context = self._getContext()
  1049. context.setdefault('authors', [])
  1050. context['authors'].append(FeedParserDict())
  1051. _start_managingeditor = _start_author
  1052. _start_dc_author = _start_author
  1053. _start_dc_creator = _start_author
  1054. _start_itunes_author = _start_author
  1055. def _end_author(self):
  1056. self.pop('author')
  1057. self.inauthor = 0
  1058. self._sync_author_detail()
  1059. _end_managingeditor = _end_author
  1060. _end_dc_author = _end_author
  1061. _end_dc_creator = _end_author
  1062. _end_itunes_author = _end_author
  1063. def _start_itunes_owner(self, attrsD):
  1064. self.inpublisher = 1
  1065. self.push('publisher', 0)
  1066. def _end_itunes_owner(self):
  1067. self.pop('publisher')
  1068. self.inpublisher = 0
  1069. self._sync_author_detail('publisher')
  1070. def _start_contributor(self, attrsD):
  1071. self.incontributor = 1
  1072. context = self._getContext()
  1073. context.setdefault('contributors', [])
  1074. context['contributors'].append(FeedParserDict())
  1075. self.push('contributor', 0)
  1076. def _end_contributor(self):
  1077. self.pop('contributor')
  1078. self.incontributor = 0
  1079. def _start_dc_contributor(self, attrsD):
  1080. self.incontributor = 1
  1081. context = self._getContext()
  1082. context.setdefault('contributors', [])
  1083. context['contributors'].append(FeedParserDict())
  1084. self.push('name', 0)
  1085. def _end_dc_contributor(self):
  1086. self._end_name()
  1087. self.incontributor = 0
  1088. def _start_name(self, attrsD):
  1089. self.push('name', 0)
  1090. _start_itunes_name = _start_name
  1091. def _end_name(self):
  1092. value = self.pop('name')
  1093. if self.inpublisher:
  1094. self._save_author('name', value, 'publisher')
  1095. elif self.inauthor:
  1096. self._save_author('name', value)
  1097. elif self.incontributor:
  1098. self._save_contributor('name', value)
  1099. elif self.intextinput:
  1100. context = self._getContext()
  1101. context['name'] = value
  1102. _end_itunes_name = _end_name
  1103. def _start_width(self, attrsD):
  1104. self.push('width', 0)
  1105. def _end_width(self):
  1106. value = self.pop('width')
  1107. try:
  1108. value = int(value)
  1109. except ValueError:
  1110. value = 0
  1111. if self.inimage:
  1112. context = self._getContext()
  1113. context['width'] = value
  1114. def _start_height(self, attrsD):
  1115. self.push('height', 0)
  1116. def _end_height(self):
  1117. value = self.pop('height')
  1118. try:
  1119. value = int(value)
  1120. except ValueError:
  1121. value = 0
  1122. if self.inimage:
  1123. context = self._getContext()
  1124. context['height'] = value
  1125. def _start_url(self, attrsD):
  1126. self.push('href', 1)
  1127. _start_homepage = _start_url
  1128. _start_uri = _start_url
  1129. def _end_url(self):
  1130. value = self.pop('href')
  1131. if self.inauthor:
  1132. self._save_author('href', value)
  1133. elif self.incontributor:
  1134. self._save_contributor('href', value)
  1135. _end_homepage = _end_url
  1136. _end_uri = _end_url
  1137. def _start_email(self, attrsD):
  1138. self.push('email', 0)
  1139. _start_itunes_email = _start_email
  1140. def _end_email(self):
  1141. value = self.pop('email')
  1142. if self.inpublisher:
  1143. self._save_author('email', value, 'publisher')
  1144. elif self.inauthor:
  1145. self._save_author('email', value)
  1146. elif self.incontributor:
  1147. self._save_contributor('email', value)
  1148. _end_itunes_email = _end_email
  1149. def _getContext(self):
  1150. if self.insource:
  1151. context = self.sourcedata
  1152. elif self.inimage and 'image' in self.feeddata:
  1153. context = self.feeddata['image']
  1154. elif self.intextinput:
  1155. context = self.feeddata['textinput']
  1156. elif self.inentry:
  1157. context = self.entries[-1]
  1158. else:
  1159. context = self.feeddata
  1160. return context
  1161. def _save_author(self, key, value, prefix='author'):
  1162. context = self._getContext()
  1163. context.setdefault(prefix + '_detail', FeedParserDict())
  1164. context[prefix + '_detail'][key] = value
  1165. self._sync_author_detail()
  1166. context.setdefault('authors', [FeedParserDict()])
  1167. context['authors'][-1][key] = value
  1168. def _save_contributor(self, key, value):
  1169. context = self._getContext()
  1170. context.setdefault('contributors', [FeedParserDict()])
  1171. context['contributors'][-1][key] = value
  1172. def _sync_author_detail(self, key='author'):
  1173. context = self._getContext()
  1174. detail = context.get('%s_detail' % key)
  1175. if detail:
  1176. name = detail.get('name')
  1177. email = detail.get('email')
  1178. if name and email:
  1179. context[key] = u'%s (%s)' % (name, email)
  1180. elif name:
  1181. context[key] = name
  1182. elif email:
  1183. context[key] = email
  1184. else:
  1185. author, email = context.get(key), None
  1186. if not author:
  1187. return
  1188. emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
  1189. if emailmatch:
  1190. email = emailmatch.group(0)
  1191. # probably a better way to do the following, but it passes all the tests
  1192. author = author.replace(email, u'')
  1193. author = author.replace(u'()', u'')
  1194. author = author.replace(u'<>', u'')
  1195. author = author.replace(u'&lt;&gt;', u'')
  1196. author = author.strip()
  1197. if author and (author[0] == u'('):
  1198. author = author[1:]
  1199. if author and (author[-1] == u')'):
  1200. author = author[:-1]
  1201. author = author.strip()
  1202. if author or email:
  1203. context.setdefault('%s_detail' % key, FeedParserDict())
  1204. if author:
  1205. context['%s_detail' % key]['name'] = author
  1206. if email:
  1207. context['%s_detail' % key]['email'] = email
  1208. def _start_subtitle(self, attrsD):
  1209. self.pushContent('subtitle', attrsD, u'text/plain', 1)
  1210. _start_tagline = _start_subtitle
  1211. _start_itunes_subtitle = _start_subtitle
  1212. def _end_subtitle(self):
  1213. self.popContent('subtitle')
  1214. _end_tagline = _end_subtitle
  1215. _end_itunes_subtitle = _end_subtitle
  1216. def _start_rights(self, attrsD):
  1217. self.pushContent('rights', attrsD, u'text/plain', 1)
  1218. _start_dc_rights = _start_rights
  1219. _start_copyright = _start_rights
  1220. def _end_rights(self):
  1221. self.popContent('rights')
  1222. _end_dc_rights = _end_rights
  1223. _end_copyright = _end_rights
  1224. def _start_item(self, attrsD):
  1225. self.entries.append(FeedParserDict())
  1226. self.push('item', 0)
  1227. self.inentry = 1
  1228. self.guidislink = 0
  1229. self.title_depth = -1
  1230. id = self._getAttribute(attrsD, 'rdf:about')
  1231. if id:
  1232. context = self._getContext()
  1233. context['id'] = id
  1234. self._cdf_common(attrsD)
  1235. _start_entry = _start_item
  1236. def _end_item(self):
  1237. self.pop('item')
  1238. self.inentry = 0
  1239. _end_entry = _end_item
  1240. def _start_dc_language(self, attrsD):
  1241. self.push('language', 1)
  1242. _start_language = _start_dc_language
  1243. def _end_dc_language(self):
  1244. self.lang = self.pop('language')
  1245. _end_language = _end_dc_language
  1246. def _start_dc_publisher(self, attrsD):
  1247. self.push('publisher', 1)
  1248. _start_webmaster = _start_dc_publisher
  1249. def _end_dc_publisher(self):
  1250. self.pop('publisher')
  1251. self._sync_author_detail('publisher')
  1252. _end_webmaster = _end_dc_publisher
  1253. def _start_published(self, attrsD):
  1254. self.push('published', 1)
  1255. _start_dcterms_issued = _start_published
  1256. _start_issued = _start_published
  1257. _start_pubdate = _start_published
  1258. def _end_published(self):
  1259. value = self.pop('published')
  1260. self._save('published_parsed', _parse_date(value), overwrite=True)
  1261. _end_dcterms_issued = _end_published
  1262. _end_issued = _end_published
  1263. _end_pubdate = _end_published
  1264. def _start_updated(self, attrsD):
  1265. self.push('updated', 1)
  1266. _start_modified = _start_updated
  1267. _start_dcterms_modified = _start_updated
  1268. _start_dc_date = _start_updated
  1269. _start_lastbuilddate = _start_updated
  1270. def _end_updated(self):
  1271. value = self.pop('updated')
  1272. parsed_value = _parse_date(value)
  1273. self._save('updated_parsed', parsed_value, overwrite=True)
  1274. _end_modified = _end_updated
  1275. _end_dcterms_modified = _end_updated
  1276. _end_dc_date = _end_updated
  1277. _end_lastbuilddate = _end_updated
  1278. def _start_created(self, attrsD):
  1279. self.push('created', 1)
  1280. _start_dcterms_created = _start_created
  1281. def _end_created(self):
  1282. value = self.pop('created')
  1283. self._save('created_parsed', _parse_date(value), overwrite=True)
  1284. _end_dcterms_created = _end_created
  1285. def _start_expirationdate(self, attrsD):
  1286. self.push('expired', 1)
  1287. def _end_expirationdate(self):
  1288. self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
  1289. def _start_cc_license(self, attrsD):
  1290. context = self._getContext()
  1291. value = self._getAttribute(attrsD, 'rdf:resource')
  1292. attrsD = FeedParserDict()
  1293. attrsD['rel'] = u'license'
  1294. if value:
  1295. attrsD['href']=value
  1296. context.setdefault('links', []).append(attrsD)
  1297. def _start_creativecommons_license(self, attrsD):
  1298. self.push('license', 1)
  1299. _start_creativeCommons_license = _start_creativecommons_license
  1300. def _end_creativecommons_license(self):
  1301. value = self.pop('license')
  1302. context = self._getContext()
  1303. attrsD = FeedParserDict()
  1304. attrsD['rel'] = u'license'
  1305. if value:
  1306. attrsD['href'] = value
  1307. context.setdefault('links', []).append(attrsD)
  1308. del context['license']
  1309. _end_creativeCommons_license = _end_creativecommons_license
  1310. def _addXFN(self, relationships, href, name):
  1311. context = self._getContext()
  1312. xfn = context.setdefault('xfn', [])
  1313. value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
  1314. if value not in xfn:
  1315. xfn.append(value)
  1316. def _addTag(self, term, scheme, label):
  1317. context = self._getContext()
  1318. tags = context.setdefault('tags', [])
  1319. if (not term) and (not scheme) and (not label):
  1320. return
  1321. value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
  1322. if value not in tags:
  1323. tags.append(value)
  1324. def _start_category(self, attrsD):
  1325. term = attrsD.get('term')
  1326. scheme = attrsD.get('scheme', attrsD.get('domain'))
  1327. label = attrsD.get('label')
  1328. self._addTag(term, scheme, label)
  1329. self.push('category', 1)
  1330. _start_dc_subject = _start_category
  1331. _start_keywords = _start_category
  1332. def _start_media_category(self, attrsD):
  1333. attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
  1334. self._start_category(attrsD)
  1335. def _end_itunes_keywords(self):
  1336. for term in self.pop('itunes_keywords').split(','):
  1337. if term.strip():
  1338. self._addTag(term.strip(), u'http://www.itunes.com/', None)
  1339. def _start_itunes_category(self, attrsD):
  1340. self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
  1341. self.push('category', 1)
  1342. def _end_category(self):
  1343. value = self.pop('category')
  1344. if not value:
  1345. return
  1346. context = self._getContext()
  1347. tags = context['tags']
  1348. if value and len(tags) and not tags[-1]['term']:
  1349. tags[-1]['term'] = value
  1350. else:
  1351. self._addTag(value, None, None)
  1352. _end_dc_subject = _end_category
  1353. _end_keywords = _end_category
  1354. _end_itunes_category = _end_category
  1355. _end_media_category = _end_category
  1356. def _start_cloud(self, attrsD):
  1357. self._getContext()['cloud'] = FeedParserDict(attrsD)
  1358. def _start_link(self, attrsD):
  1359. attrsD.setdefault('rel', u'alternate')
  1360. if attrsD['rel'] == u'self':
  1361. attrsD.setdefault('type', u'application/atom+xml')
  1362. else:
  1363. attrsD.setdefault('type', u'text/html')
  1364. context = self._getContext()
  1365. attrsD = self._itsAnHrefDamnIt(attrsD)
  1366. if 'href' in attrsD:
  1367. attrsD['href'] = self.resolveURI(attrsD['href'])
  1368. expectingText = self.infeed or self.inentry or self.insource
  1369. context.setdefault('links', [])
  1370. if not (self.inentry and self.inimage):
  1371. context['links'].append(FeedParserDict(attrsD))
  1372. if 'href' in attrsD:
  1373. expectingText = 0
  1374. if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
  1375. context['link'] = attrsD['href']
  1376. else:
  1377. self.push('link', expectingText)
  1378. def _end_link(self):
  1379. value = self.pop('link')
  1380. def _start_guid(self, attrsD):
  1381. self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
  1382. self.push('id', 1)
  1383. _start_id = _start_guid
  1384. def _end_guid(self):
  1385. value = self.pop('id')
  1386. self._save('guidislink', self.guidislink and 'link' not in self._getContext())
  1387. if self.guidislink:
  1388. # guid acts as link, but only if 'ispermalink' is not present or is 'true',
  1389. # and only if the item doesn't already have a link element
  1390. self._save('link', value)
  1391. _end_id = _end_guid
  1392. def _start_title(self, attrsD):
  1393. if self.svgOK:
  1394. return self.unknown_starttag('title', attrsD.items())
  1395. self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
  1396. _start_dc_title = _start_title
  1397. _start_media_title = _start_title
  1398. def _end_title(self):
  1399. if self.svgOK:
  1400. return
  1401. value = self.popContent('title')
  1402. if not value:
  1403. return
  1404. self.title_depth = self.depth
  1405. _end_dc_title = _end_title
  1406. def _end_media_title(self):
  1407. title_depth = self.title_depth
  1408. self._end_title()
  1409. self.title_depth = title_depth
  1410. def _start_description(self, attrsD):
  1411. context = self._getContext()
  1412. if 'summary' in context:
  1413. self._summaryKey = 'content'
  1414. self._start_content(attrsD)
  1415. else:
  1416. self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
  1417. _start_dc_description = _start_description
  1418. def _start_abstract(self, attrsD):
  1419. self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
  1420. def _end_description(self):
  1421. if self._summaryKey == 'content':
  1422. self._end_content()
  1423. else:
  1424. value = self.popContent('description')
  1425. self._summaryKey = None
  1426. _end_abstract = _end_description
  1427. _end_dc_description = _end_description
  1428. def _start_info(self, attrsD):
  1429. self.pushContent('info', attrsD, u'text/plain', 1)
  1430. _start_feedburner_browserfriendly = _start_info
  1431. def _end_info(self):
  1432. self.popContent('info')
  1433. _end_feedburner_browserfriendly = _end_info
  1434. def _start_generator(self, attrsD):
  1435. if attrsD:
  1436. attrsD = self._itsAnHrefDamnIt(attrsD)
  1437. if 'href' in attrsD:
  1438. attrsD['href'] = self.resolveURI(attrsD['href'])
  1439. self._getContext()['generator_detail'] = FeedParserDict(attrsD)
  1440. self.push('generator', 1)
  1441. def _end_generator(self):
  1442. value = self.pop('generator')
  1443. context = self._getContext()
  1444. if 'generator_detail' in context:
  1445. context['generator_detail']['name'] = value
  1446. def _start_admin_generatoragent(self, attrsD):
  1447. self.push('generator', 1)
  1448. value = self._getAttribute(attrsD, 'rdf:resource')
  1449. if value:
  1450. self.elementstack[-1][2].append(value)
  1451. self.pop('generator')
  1452. self._getContext()['generator_detail'] = FeedParserDict({'href': value})
  1453. def _start_admin_errorreportsto(self, attrsD):
  1454. self.push('errorreportsto', 1)
  1455. value = self._getAttribute(attrsD, 'rdf:resource')
  1456. if value:
  1457. self.elementstack[-1][2].append(value)
  1458. self.pop('errorreportsto')
  1459. def _start_summary(self, attrsD):
  1460. context = self._getContext()
  1461. if 'summary' in context:
  1462. self._summaryKey = 'content'
  1463. self._start_content(attrsD)
  1464. else:
  1465. self._summaryKey = 'summary'
  1466. self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
  1467. _start_itunes_summary = _start_summary
  1468. def _end_summary(self):
  1469. if self._summaryKey == 'content':
  1470. self._end_content()
  1471. else:
  1472. self.popContent(self._summaryKey or 'summary')
  1473. self._summaryKey = None
  1474. _end_itunes_summary = _end_summary
  1475. def _start_enclosure(self, attrsD):
  1476. attrsD = self._itsAnHrefDamnIt(attrsD)
  1477. context = self._getContext()
  1478. attrsD['rel'] = u'enclosure'
  1479. context.setdefault('links', []).append(FeedParserDict(attrsD))
  1480. def _start_source(self, attrsD):
  1481. if 'url' in attrsD:
  1482. # This means that we're processing a source element from an RSS 2.0 feed
  1483. self.sourcedata['href'] = attrsD[u'url']
  1484. self.push('source', 1)
  1485. self.insource = 1
  1486. self.title_depth = -1
  1487. def _end_source(self):
  1488. self.insource = 0
  1489. value = self.pop('source')
  1490. if value:
  1491. self.sourcedata['title'] = value
  1492. self._getContext()['source'] = copy.deepcopy(self.sourcedata)
  1493. self.sourcedata.clear()
  1494. def _start_content(self, attrsD):
  1495. self.pushContent('content', attrsD, u'text/plain', 1)
  1496. src = attrsD.get('src')
  1497. if src:
  1498. self.contentparams['src'] = src
  1499. self.push('content', 1)
  1500. def _start_body(self, attrsD):
  1501. self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
  1502. _start_xhtml_body = _start_body
  1503. def _start_content_encoded(self, attrsD):
  1504. self.pushContent('content', attrsD, u'text/html', 1)
  1505. _start_fullitem = _start_content_encoded
  1506. def _end_content(self):
  1507. copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
  1508. value = self.popContent('content')
  1509. if copyToSummary:
  1510. self._save('summary', value)
  1511. _end_body = _end_content
  1512. _end_xhtml_body = _end_content
  1513. _end_content_encoded = _end_content
  1514. _end_fullitem = _end_content
  1515. def _start_itunes_image(self, attrsD):
  1516. self.push('itunes_image', 0)
  1517. if attrsD.get('href'):
  1518. self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
  1519. elif attrsD.get('url'):
  1520. self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
  1521. _start_itunes_link = _start_itunes_image
  1522. def _end_itunes_block(self):
  1523. value = self.pop('itunes_block', 0)
  1524. self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
  1525. def _end_itunes_explicit(self):
  1526. value = self.pop('itunes_explicit', 0)
  1527. # Convert 'yes' -> True, 'clean' to False, and any other value to None
  1528. # False and None both evaluate as False, so the difference can be ignored
  1529. # by applications that only need to know if the content is explicit.
  1530. self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
  1531. def _start_media_content(self, attrsD):
  1532. context = self._getContext()
  1533. context.setdefault('media_content', [])
  1534. context['media_content'].append(attrsD)
  1535. def _start_media_thumbnail(self, attrsD):
  1536. context = self._getContext()
  1537. context.setdefault('media_thumbnail', [])
  1538. self.push('url', 1) # new
  1539. context['media_thumbnail'].append(attrsD)
  1540. def _end_media_thumbnail(self):
  1541. url = self.pop('url')
  1542. context = self._getContext()
  1543. if url != None and len(url.strip()) != 0:
  1544. if 'url' not in context['media_thumbnail'][-1]:
  1545. context['media_thumbnail'][-1]['url'] = url
  1546. def _start_media_player(self, attrsD):
  1547. self.push('media_player', 0)
  1548. self._getContext()['media_player'] = FeedParserDict(attrsD)
  1549. def _end_media_player(self):
  1550. value = self.pop('media_player')
  1551. context = self._getContext()
  1552. context['media_player']['content'] = value
  1553. def _start_newlocation(self, attrsD):
  1554. self.push('newlocation', 1)
  1555. def _end_newlocation(self):
  1556. url = self.pop('newlocation')
  1557. context = self._getContext()
  1558. # don't set newlocation if the context isn't right
  1559. if context is not self.feeddata:
  1560. return
  1561. context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
  1562. if _XML_AVAILABLE:
  1563. class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
  1564. def __init__(self, baseuri, baselang, encoding):
  1565. xml.sax.handler.ContentHandler.__init__(self)
  1566. _FeedParserMixin.__init__(self, baseuri, baselang, encoding)
  1567. self.bozo = 0
  1568. self.exc = None
  1569. self.decls = {}
  1570. def startPrefixMapping(self, prefix, uri):
  1571. if not uri:
  1572. return
  1573. # Jython uses '' instead of None; standardize on None
  1574. prefix = prefix or None
  1575. self.trackNamespace(prefix, uri)
  1576. if prefix and uri == 'http://www.w3.org/1999/xlink':
  1577. self.decls['xmlns:' + prefix] = uri
  1578. def startElementNS(self, name, qname, attrs):
  1579. namespace, localname = name
  1580. lowernamespace = str(namespace or '').lower()
  1581. if lowernamespace.find(u'backend.userland.com/rss') <> -1:
  1582. # match any backend.userland.com namespace
  1583. namespace = u'http://backend.userland.com/rss'
  1584. lowernamespace = namespace
  1585. if qname and qname.find(':') > 0:
  1586. givenprefix = qname.split(':')[0]
  1587. else:
  1588. givenprefix = None
  1589. prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
  1590. if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
  1591. raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
  1592. localname = str(localname).lower()
  1593. # qname implementation is horribly broken in Python 2.1 (it
  1594. # doesn't report any), and slightly broken in Python 2.2 (it
  1595. # doesn't report the xml: namespace). So we match up namespaces
  1596. # with a known list first, and then possibly override them with
  1597. # the qnames the SAX parser gives us (if indeed it gives us any
  1598. # at all). Thanks to MatejC for helping me test this and
  1599. # tirelessly telling me that it didn't work yet.
  1600. attrsD, self.decls = self.decls, {}
  1601. if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
  1602. attrsD['xmlns']=namespace
  1603. if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
  1604. attrsD['xmlns']=namespace
  1605. if prefix:
  1606. localname = prefix.lower() + ':' + localname
  1607. elif namespace and not qname: #Expat
  1608. for name,value in self.namespacesInUse.items():
  1609. if name and value == namespace:
  1610. localname = name + ':' + localname
  1611. break
  1612. for (namespace, attrlocalname), attrvalue in attrs.items():
  1613. lowernamespace = (namespace or '').lower()
  1614. prefix = self._matchnamespaces.get(lowernamespace, '')
  1615. if prefix:
  1616. attrlocalname = prefix + ':' + attrlocalname
  1617. attrsD[str(attrlocalname).lower()] = attrvalue
  1618. for qname in attrs.getQNames():
  1619. attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
  1620. self.unknown_starttag(localname, attrsD.items())
  1621. def characters(self, text):
  1622. self.handle_data(text)
  1623. def endElementNS(self, name, qname):
  1624. namespace, localname = name
  1625. lowernamespace = str(namespace or '').lower()
  1626. if qname and qname.find(':') > 0:
  1627. givenprefix = qname.split(':')[0]
  1628. else:
  1629. givenprefix = ''
  1630. prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
  1631. if prefix:
  1632. localname = prefix + ':' + localname
  1633. elif namespace and not qname: #Expat
  1634. for name,value in self.namespacesInUse.items():
  1635. if name and value == namespace:
  1636. localname = name + ':' + localname
  1637. break
  1638. localname = str(localname).lower()
  1639. self.unknown_endtag(localname)
  1640. def error(self, exc):
  1641. self.bozo = 1
  1642. self.exc = exc
  1643. # drv_libxml2 calls warning() in some cases
  1644. warning = error
  1645. def fatalError(self, exc):
  1646. self.error(exc)
  1647. raise exc
  1648. class _BaseHTMLProcessor(sgmllib.SGMLParser):
  1649. special = re.compile('''[<>'"]''')
  1650. bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
  1651. elements_no_end_tag = set([
  1652. 'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
  1653. 'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
  1654. 'source', 'track', 'wbr'
  1655. ])
  1656. def __init__(self, encoding, _type):
  1657. self.encoding = encoding
  1658. self._type = _type
  1659. sgmllib.SGMLParser.__init__(self)
  1660. def reset(self):
  1661. self.pieces = []
  1662. sgmllib.SGMLParser.reset(self)
  1663. def _shorttag_replace(self, match):
  1664. tag = match.group(1)
  1665. if tag in self.elements_no_end_tag:
  1666. return '<' + tag + ' />'
  1667. else:
  1668. return '<' + tag + '></' + tag + '>'
  1669. # By declaring these methods and overriding their compiled code
  1670. # with the code from sgmllib, the original code will execute in
  1671. # feedparser's scope instead of sgmllib's. This means that the
  1672. # `tagfind` and `charref` regular expressions will be found as
  1673. # they're declared above, not as they're declared in sgmllib.
  1674. def goahead(self, i):
  1675. pass
  1676. goahead.func_code = sgmllib.SGMLParser.goahead.func_code
  1677. def __parse_starttag(self, i):
  1678. pass
  1679. __parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
  1680. def parse_starttag(self,i):
  1681. j = self.__parse_starttag(i)
  1682. if self._type == 'application/xhtml+xml':
  1683. if j>2 and self.rawdata[j-2:j]=='/>':
  1684. self.unknown_endtag(self.lasttag)
  1685. return j
  1686. def feed(self, data):
  1687. data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'&lt;!\1', data)
  1688. data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
  1689. data = data.replace('&#39;', "'")
  1690. data = data.replace('&#34;', '"')
  1691. try:
  1692. bytes
  1693. if bytes is str:
  1694. raise NameError
  1695. self.encoding = self.encoding + u'_INVALID_PYTHON_3'
  1696. except NameError:
  1697. if self.encoding and isinstance(data, unicode):
  1698. data = data.encode(self.encoding)
  1699. sgmllib.SGMLParser.feed(self, data)
  1700. sgmllib.SGMLParser.close(self)
  1701. def normalize_attrs(self, attrs):
  1702. if not attrs:
  1703. return attrs
  1704. # utility method to be called by descendants
  1705. attrs = dict([(k.lower(), v) for k, v in attrs]).items()
  1706. attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
  1707. attrs.sort()
  1708. return attrs
  1709. def unknown_starttag(self, tag, attrs):
  1710. # called for each start tag
  1711. # attrs is a list of (attr, value) tuples
  1712. # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
  1713. uattrs = []
  1714. strattrs=''
  1715. if attrs:
  1716. for key, value in attrs:
  1717. value=value.replace('>','&gt;').replace('<','&lt;').replace('"','&quot;')
  1718. value = self.bare_ampersand.sub("&amp;", value)
  1719. # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
  1720. if not isinstance(value, unicode):
  1721. value = value.decode(self.encoding, 'ignore')
  1722. try:
  1723. # Currently, in Python 3 the key is already a str, and cannot be decoded again
  1724. uattrs.append((unicode(key, self.encoding), value))
  1725. except TypeError:
  1726. uattrs.append((key, value))
  1727. strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
  1728. if self.encoding:
  1729. try:
  1730. strattrs = strattrs.encode(self.encoding)
  1731. except (UnicodeEncodeError, LookupError):
  1732. pass
  1733. if tag in self.elements_no_end_tag:
  1734. self.pieces.append('<%s%s />' % (tag, strattrs))
  1735. else:
  1736. self.pieces.append('<%s%s>' % (tag, strattrs))
  1737. def unknown_endtag(self, tag):
  1738. # called for each end tag, e.g. for </pre>, tag will be 'pre'
  1739. # Reconstruct the original end tag.
  1740. if tag not in self.elements_no_end_tag:
  1741. self.pieces.append("</%s>" % tag)
  1742. def handle_charref(self, ref):
  1743. # called for each character reference, e.g. for '&#160;', ref will be '160'
  1744. # Reconstruct the original character reference.
  1745. if ref.startswith('x'):
  1746. value = int(ref[1:], 16)
  1747. else:
  1748. value = int(ref)
  1749. if value in _cp1252:
  1750. self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
  1751. else:
  1752. self.pieces.append('&#%s;' % ref)
  1753. def handle_entityref(self, ref):
  1754. # called for each entity reference, e.g. for '&copy;', ref will be 'copy'
  1755. # Reconstruct the original entity reference.
  1756. if ref in name2codepoint or ref == 'apos':
  1757. self.pieces.append('&%s;' % ref)
  1758. else:
  1759. self.pieces.append('&amp;%s' % ref)
  1760. def handle_data(self, text):
  1761. # called for each block of plain text, i.e. outside of any tag and
  1762. # not containing any character or entity references
  1763. # Store the original text verbatim.
  1764. self.pieces.append(text)
  1765. def handle_comment(self, text):
  1766. # called for each HTML comment, e.g. <!-- insert Javascript code here -->
  1767. # Reconstruct the original comment.
  1768. self.pieces.append('<!--%s-->' % text)
  1769. def handle_pi(self, text):
  1770. # called for each processing instruction, e.g. <?instruction>
  1771. # Reconstruct original processing instruction.
  1772. self.pieces.append('<?%s>' % text)
  1773. def handle_decl(self, text):
  1774. # called for the DOCTYPE, if present, e.g.
  1775. # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
  1776. # "http://www.w3.org/TR/html4/loose.dtd">
  1777. # Reconstruct original DOCTYPE
  1778. self.pieces.append('<!%s>' % text)
  1779. _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
  1780. def _scan_name(self, i, declstartpos):
  1781. rawdata = self.rawdata
  1782. n = len(rawdata)
  1783. if i == n:
  1784. return None, -1
  1785. m = self._new_declname_match(rawdata, i)
  1786. if m:
  1787. s = m.group()
  1788. name = s.strip()
  1789. if (i + len(s)) == n:
  1790. return None, -1 # end of buffer
  1791. return name.lower(), m.end()
  1792. else:
  1793. self.handle_data(rawdata)
  1794. # self.updatepos(declstartpos, i)
  1795. return None, -1
  1796. def convert_charref(self, name):
  1797. return '&#%s;' % name
  1798. def convert_entityref(self, name):
  1799. return '&%s;' % name
  1800. def output(self):
  1801. '''Return processed HTML as a single string'''
  1802. return ''.join([str(p) for p in self.pieces])
  1803. def parse_declaration(self, i):
  1804. try:
  1805. return sgmllib.SGMLParser.parse_declaration(self, i)
  1806. except sgmllib.SGMLParseError:
  1807. # escape the doctype declaration and continue parsing
  1808. self.handle_data('&lt;')
  1809. return i+1
  1810. class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
  1811. def __init__(self, baseuri, baselang, encoding, entities):
  1812. sgmllib.SGMLParser.__init__(self)
  1813. _FeedParserMixin.__init__(self, baseuri, baselang, encoding)
  1814. _BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
  1815. self.entities=entities
  1816. def decodeEntities(self, element, data):
  1817. data = data.replace('&#60;', '&lt;')
  1818. data = data.replace('&#x3c;', '&lt;')
  1819. data = data.replace('&#x3C;', '&lt;')
  1820. data = data.replace('&#62;', '&gt;')
  1821. data = data.replace('&#x3e;', '&gt;')
  1822. data = data.replace('&#x3E;', '&gt;')
  1823. data = data.replace('&#38;', '&amp;')
  1824. data = data.replace('&#x26;', '&amp;')
  1825. data = data.replace('&#34;', '&quot;')
  1826. data = data.replace('&#x22;', '&quot;')
  1827. data = data.replace('&#39;', '&apos;')
  1828. data = data.replace('&#x27;', '&apos;')
  1829. if not self.contentparams.get('type', u'xml').endswith(u'xml'):
  1830. data = data.replace('&lt;', '<')
  1831. data = data.replace('&gt;', '>')
  1832. data = data.replace('&amp;', '&')
  1833. data = data.replace('&quot;', '"')
  1834. data = data.replace('&apos;', "'")
  1835. return data
  1836. def strattrs(self, attrs):
  1837. return ''.join([' %s="%s"' % (n,v.replace('"','&quot;')) for n,v in attrs])
  1838. class _MicroformatsParser:
  1839. STRING = 1
  1840. DATE = 2
  1841. URI = 3
  1842. NODE = 4
  1843. EMAIL = 5
  1844. known_xfn_relationships = set(['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'])
  1845. known_binary_extensions = set(['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'])
  1846. def __init__(self, data, baseuri, encoding):
  1847. self.document = BeautifulSoup.BeautifulSoup(data)
  1848. self.baseuri = baseuri
  1849. self.encoding = encoding
  1850. if isinstance(data, unicode):
  1851. data = data.encode(encoding)
  1852. self.tags = []
  1853. self.enclosures = []
  1854. self.xfn = []
  1855. self.vcard = None
  1856. def vcardEscape(self, s):
  1857. if isinstance(s, basestring):
  1858. s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
  1859. return s
  1860. def vcardFold(self, s):
  1861. s = re.sub(';+$', '', s)
  1862. sFolded = ''
  1863. iMax = 75
  1864. sPrefix = ''
  1865. while len(s) > iMax:
  1866. sFolded += sPrefix + s[:iMax] + '\n'
  1867. s = s[iMax:]
  1868. sPrefix = ' '
  1869. iMax = 74
  1870. sFolded += sPrefix + s
  1871. return sFolded
  1872. def normalize(self, s):
  1873. return re.sub(r'\s+', ' ', s).strip()
  1874. def unique(self, aList):
  1875. results = []
  1876. for element in aList:
  1877. if element not in results:
  1878. results.append(element)
  1879. return results
  1880. def toISO8601(self, dt):
  1881. return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
  1882. def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
  1883. all = lambda x: 1
  1884. sProperty = sProperty.lower()
  1885. bFound = 0
  1886. bNormalize = 1
  1887. propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)}
  1888. if bAllowMultiple and (iPropertyType != self.NODE):
  1889. snapResults = []
  1890. containers = elmRoot(['ul', 'ol'], propertyMatch)
  1891. for container in containers:
  1892. snapResults.extend(container('li'))
  1893. bFound = (len(snapResults) != 0)
  1894. if not bFound:
  1895. snapResults = elmRoot(all, propertyMatch)
  1896. bFound = (len(snapResults) != 0)
  1897. if (not bFound) and (sProperty == 'value'):
  1898. snapResults = elmRoot('pre')
  1899. bFound = (len(snapResults) != 0)
  1900. bNormalize = not bFound
  1901. if not bFound:
  1902. snapResults = [elmRoot]
  1903. bFound = (len(snapResults) != 0)
  1904. arFilter = []
  1905. if sProperty == 'vcard':
  1906. snapFilter = elmRoot(all, propertyMatch)
  1907. for node in snapFilter:
  1908. if node.findParent(all, propertyMatch):
  1909. arFilter.append(node)
  1910. arResults = []
  1911. for node in snapResults:
  1912. if node not in arFilter:
  1913. arResults.append(node)
  1914. bFound = (len(arResults) != 0)
  1915. if not bFound:
  1916. if bAllowMultiple:
  1917. return []
  1918. elif iPropertyType == self.STRING:
  1919. return ''
  1920. elif iPropertyType == self.DATE:
  1921. return None
  1922. elif iPropertyType == self.URI:
  1923. return ''
  1924. elif iPropertyType == self.NODE:
  1925. return None
  1926. else:
  1927. return None
  1928. arValues = []
  1929. for elmResult in arResults:
  1930. sValue = None
  1931. if iPropertyType == self.NODE:
  1932. if bAllowMultiple:
  1933. arValues.append(elmResult)
  1934. continue
  1935. else:
  1936. return elmResult
  1937. sNodeName = elmResult.name.lower()
  1938. if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
  1939. sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
  1940. if sValue:
  1941. sValue = bNormalize and self.normalize(sValue) or sValue.strip()
  1942. if (not sValue) and (sNodeName == 'abbr'):
  1943. sValue = elmResult.get('title')
  1944. if sValue:
  1945. sValue = bNormalize and self.normalize(sValue) or sValue.strip()
  1946. if (not sValue) and (iPropertyType == self.URI):
  1947. if sNodeName == 'a':
  1948. sValue = elmResult.get('href')
  1949. elif sNodeName == 'img':
  1950. sValue = elmResult.get('src')
  1951. elif sNodeName == 'object':
  1952. sValue = elmResult.get('data')
  1953. if sValue:
  1954. sValue = bNormalize and self.normalize(sValue) or sValue.strip()
  1955. if (not sValue) and (sNodeName == 'img'):
  1956. sValue = elmResult.get('alt')
  1957. if sValue:
  1958. sValue = bNormalize and self.normalize(sValue) or sValue.strip()
  1959. if not sValue:
  1960. sValue = elmResult.renderContents()
  1961. sValue = re.sub(r'<\S[^>]*>', '', sValue)
  1962. sValue = sValue.replace('\r\n', '\n')
  1963. sValue = sValue.replace('\r', '\n')
  1964. if sValue:
  1965. sValue = bNormalize and self.normalize(sValue) or sValue.strip()
  1966. if not sValue:
  1967. continue
  1968. if iPropertyType == self.DATE:
  1969. sValue = _parse_date_iso8601(sValue)
  1970. if bAllowMultiple:
  1971. arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
  1972. else:
  1973. return bAutoEscape and self.vcardEscape(sValue) or sValue
  1974. return arValues
  1975. def findVCards(self, elmRoot, bAgentParsing=0):
  1976. sVCards = ''
  1977. if not bAgentParsing:
  1978. arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
  1979. else:
  1980. arCards = [elmRoot]
  1981. for elmCard in arCards:
  1982. arLines = []
  1983. def processSingleString(sProperty):
  1984. sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding)
  1985. if sValue:
  1986. arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
  1987. return sValue or u''
  1988. def processSingleURI(sProperty):
  1989. sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
  1990. if sValue:
  1991. sContentType = ''
  1992. sEncoding = ''
  1993. sValueKey = ''
  1994. if sValue.startswith('data:'):
  1995. sEncoding = ';ENCODING=b'
  1996. sContentType = sValue.split(';')[0].split('/').pop()
  1997. sValue = sValue.split(',', 1).pop()
  1998. else:
  1999. elmValue = self.getPropertyValue(elmCard, sProperty)
  2000. if elmValue:
  2001. if sProperty != 'url':
  2002. sValueKey = ';VALUE=uri'
  2003. sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
  2004. sContentType = sContentType.upper()
  2005. if sContentType == 'OCTET-STREAM':
  2006. sContentType = ''
  2007. if sContentType:
  2008. sContentType = ';TYPE=' + sContentType.upper()
  2009. arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
  2010. def processTypeValue(sProperty, arDefaultType, arForceType=None):
  2011. arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
  2012. for elmResult in arResults:
  2013. arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
  2014. if arForceType:
  2015. arType = self.unique(arForceType + arType)
  2016. if not arType:
  2017. arType = arDefaultType
  2018. sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
  2019. if sValue:
  2020. arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
  2021. # AGENT
  2022. # must do this before all other properties because it is destructive
  2023. # (removes nested class="vcard" nodes so they don't interfere with
  2024. # this vcard's other properties)
  2025. arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
  2026. for elmAgent in arAgent:
  2027. if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
  2028. sAgentValue = self.findVCards(elmAgent, 1) + '\n'
  2029. sAgentValue = sAgentValue.replace('\n', '\\n')
  2030. sAgentValue = sAgentValue.replace(';', '\\;')
  2031. if sAgentValue:
  2032. arLines.append(self.vcardFold('AGENT:' + sAgentValue))
  2033. # Completely remove the agent element from the parse tree
  2034. elmAgent.extract()
  2035. else:
  2036. sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
  2037. if sAgentValue:
  2038. arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
  2039. # FN (full name)
  2040. sFN = processSingleString('fn')
  2041. # N (name)
  2042. elmName = self.getPropertyValue(elmCard, 'n')
  2043. if elmName:
  2044. sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
  2045. sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
  2046. arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
  2047. arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
  2048. arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
  2049. arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
  2050. sGivenName + ';' +
  2051. ','.join(arAdditionalNames) + ';' +
  2052. ','.join(arHonorificPrefixes) + ';' +
  2053. ','.join(arHonorificSuffixes)))
  2054. elif sFN:
  2055. # implied "N" optimization
  2056. # http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
  2057. arNames = self.normalize(sFN).split()
  2058. if len(arNames) == 2:
  2059. bFamilyNameFirst = (arNames[0].endswith(',') or
  2060. len(arNames[1]) == 1 or
  2061. ((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
  2062. if bFamilyNameFirst:
  2063. arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
  2064. else:
  2065. arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
  2066. # SORT-STRING
  2067. sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
  2068. if sSortString:
  2069. arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
  2070. # NICKNAME
  2071. arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
  2072. if arNickname:
  2073. arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
  2074. # PHOTO
  2075. processSingleURI('photo')
  2076. # BDAY
  2077. dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
  2078. if dtBday:
  2079. arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
  2080. # ADR (address)
  2081. arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
  2082. for elmAdr in arAdr:
  2083. arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
  2084. if not arType:
  2085. arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
  2086. sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
  2087. sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
  2088. sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
  2089. sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
  2090. sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
  2091. sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
  2092. sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
  2093. arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
  2094. sPostOfficeBox + ';' +
  2095. sExtendedAddress + ';' +
  2096. sStreetAddress + ';' +
  2097. sLocality + ';' +
  2098. sRegion + ';' +
  2099. sPostalCode + ';' +
  2100. sCountryName))
  2101. # LABEL
  2102. processTypeValue('label', ['intl','postal','parcel','work'])
  2103. # TEL (phone number)
  2104. processTypeValue('tel', ['voice'])
  2105. # EMAIL
  2106. processTypeValue('email', ['internet'], ['internet'])
  2107. # MAILER
  2108. processSingleString('mailer')
  2109. # TZ (timezone)
  2110. processSingleString('tz')
  2111. # GEO (geographical information)
  2112. elmGeo = self.getPropertyValue(elmCard, 'geo')
  2113. if elmGeo:
  2114. sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
  2115. sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
  2116. arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
  2117. # TITLE
  2118. processSingleString('title')
  2119. # ROLE
  2120. processSingleString('role')
  2121. # LOGO
  2122. processSingleURI('logo')
  2123. # ORG (organization)
  2124. elmOrg = self.getPropertyValue(elmCard, 'org')
  2125. if elmOrg:
  2126. sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
  2127. if not sOrganizationName:
  2128. # implied "organization-name" optimization
  2129. # http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
  2130. sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
  2131. if sOrganizationName:
  2132. arLines.append(self.vcardFold('ORG:' + sOrganizationName))
  2133. else:
  2134. arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
  2135. arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
  2136. # CATEGORY
  2137. arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
  2138. if arCategory:
  2139. arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
  2140. # NOTE
  2141. processSingleString('note')
  2142. # REV
  2143. processSingleString('rev')
  2144. # SOUND
  2145. processSingleURI('sound')
  2146. # UID
  2147. processSingleString('uid')
  2148. # URL
  2149. processSingleURI('url')
  2150. # CLASS
  2151. processSingleString('class')
  2152. # KEY
  2153. processSingleURI('key')
  2154. if arLines:
  2155. arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard']
  2156. # XXX - this is super ugly; properly fix this with issue 148
  2157. for i, s in enumerate(arLines):
  2158. if not isinstance(s, unicode):
  2159. arLines[i] = s.decode('utf-8', 'ignore')
  2160. sVCards += u'\n'.join(arLines) + u'\n'
  2161. return sVCards.strip()
  2162. def isProbablyDownloadable(self, elm):
  2163. attrsD = elm.attrMap
  2164. if 'href' not in attrsD:
  2165. return 0
  2166. linktype = attrsD.get('type', '').strip()
  2167. if linktype.startswith('audio/') or \
  2168. linktype.startswith('video/') or \
  2169. (linktype.startswith('application/') and not linktype.endswith('xml')):
  2170. return 1
  2171. path = urlparse.urlparse(attrsD['href'])[2]
  2172. if path.find('.') == -1:
  2173. return 0
  2174. fileext = path.split('.').pop().lower()
  2175. return fileext in self.known_binary_extensions
  2176. def findTags(self):
  2177. all = lambda x: 1
  2178. for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
  2179. href = elm.get('href')
  2180. if not href:
  2181. continue
  2182. urlscheme, domain, path, params, query, fragment = \
  2183. urlparse.urlparse(_urljoin(self.baseuri, href))
  2184. segments = path.split('/')
  2185. tag = segments.pop()
  2186. if not tag:
  2187. if segments:
  2188. tag = segments.pop()
  2189. else:
  2190. # there are no tags
  2191. continue
  2192. tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
  2193. if not tagscheme.endswith('/'):
  2194. tagscheme += '/'
  2195. self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
  2196. def findEnclosures(self):
  2197. all = lambda x: 1
  2198. enclosure_match = re.compile(r'\benclosure\b')
  2199. for elm in self.document(all, {'href': re.compile(r'.+')}):
  2200. if not enclosure_match.search(elm.get('rel', u'')) and not self.isProbablyDownloadable(elm):
  2201. continue
  2202. if elm.attrMap not in self.enclosures:
  2203. self.enclosures.append(elm.attrMap)
  2204. if elm.string and not elm.get('title'):
  2205. self.enclosures[-1]['title'] = elm.string
  2206. def findXFN(self):
  2207. all = lambda x: 1
  2208. for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
  2209. rels = elm.get('rel', u'').split()
  2210. xfn_rels = [r for r in rels if r in self.known_xfn_relationships]
  2211. if xfn_rels:
  2212. self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
  2213. def _parseMicroformats(htmlSource, baseURI, encoding):
  2214. if not BeautifulSoup:
  2215. return
  2216. try:
  2217. p = _MicroformatsParser(htmlSource, baseURI, encoding)
  2218. except UnicodeEncodeError:
  2219. # sgmllib throws this exception when performing lookups of tags
  2220. # with non-ASCII characters in them.
  2221. return
  2222. p.vcard = p.findVCards(p.document)
  2223. p.findTags()
  2224. p.findEnclosures()
  2225. p.findXFN()
  2226. return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
  2227. class _RelativeURIResolver(_BaseHTMLProcessor):
  2228. relative_uris = set([('a', 'href'),
  2229. ('applet', 'codebase'),
  2230. ('area', 'href'),
  2231. ('blockquote', 'cite'),
  2232. ('body', 'background'),
  2233. ('del', 'cite'),
  2234. ('form', 'action'),
  2235. ('frame', 'longdesc'),
  2236. ('frame', 'src'),
  2237. ('iframe', 'longdesc'),
  2238. ('iframe', 'src'),
  2239. ('head', 'profile'),
  2240. ('img', 'longdesc'),
  2241. ('img', 'src'),
  2242. ('img', 'usemap'),
  2243. ('input', 'src'),
  2244. ('input', 'usemap'),
  2245. ('ins', 'cite'),
  2246. ('link', 'href'),
  2247. ('object', 'classid'),
  2248. ('object', 'codebase'),
  2249. ('object', 'data'),
  2250. ('object', 'usemap'),
  2251. ('q', 'cite'),
  2252. ('script', 'src')])
  2253. def __init__(self, baseuri, encoding, _type):
  2254. _BaseHTMLProcessor.__init__(self, encoding, _type)
  2255. self.baseuri = baseuri
  2256. def resolveURI(self, uri):
  2257. return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
  2258. def unknown_starttag(self, tag, attrs):
  2259. attrs = self.normalize_attrs(attrs)
  2260. attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
  2261. _BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
  2262. def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
  2263. if not _SGML_AVAILABLE:
  2264. return htmlSource
  2265. p = _RelativeURIResolver(baseURI, encoding, _type)
  2266. p.feed(htmlSource)
  2267. return p.output()
  2268. def _makeSafeAbsoluteURI(base, rel=None):
  2269. # bail if ACCEPTABLE_URI_SCHEMES is empty
  2270. if not ACCEPTABLE_URI_SCHEMES:
  2271. try:
  2272. return _urljoin(base, rel or u'')
  2273. except ValueError:
  2274. return u''
  2275. if not base:
  2276. return rel or u''
  2277. if not rel:
  2278. try:
  2279. scheme = urlparse.urlparse(base)[0]
  2280. except ValueError:
  2281. return u''
  2282. if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
  2283. return base
  2284. return u''
  2285. try:
  2286. uri = _urljoin(base, rel)
  2287. except ValueError:
  2288. return u''
  2289. if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
  2290. return u''
  2291. return uri
  2292. class _HTMLSanitizer(_BaseHTMLProcessor):
  2293. acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
  2294. 'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
  2295. 'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
  2296. 'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
  2297. 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
  2298. 'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
  2299. 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
  2300. 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
  2301. 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
  2302. 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
  2303. 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
  2304. 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
  2305. 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
  2306. acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
  2307. 'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
  2308. 'background', 'balance', 'bgcolor', 'bgproperties', 'border',
  2309. 'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
  2310. 'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
  2311. 'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
  2312. 'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
  2313. 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
  2314. 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
  2315. 'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
  2316. 'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
  2317. 'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
  2318. 'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
  2319. 'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
  2320. 'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
  2321. 'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
  2322. 'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
  2323. 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
  2324. 'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
  2325. 'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
  2326. 'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
  2327. 'xml:lang'])
  2328. unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
  2329. acceptable_css_properties = set(['azimuth', 'background-color',
  2330. 'border-bottom-color', 'border-collapse', 'border-color',
  2331. 'border-left-color', 'border-right-color', 'border-top-color', 'clear',
  2332. 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
  2333. 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
  2334. 'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
  2335. 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
  2336. 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
  2337. 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
  2338. 'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
  2339. 'white-space', 'width'])
  2340. # survey of common keywords found in feeds
  2341. acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
  2342. 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
  2343. 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
  2344. 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
  2345. 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
  2346. 'transparent', 'underline', 'white', 'yellow'])
  2347. valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
  2348. '\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
  2349. mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
  2350. 'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
  2351. 'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
  2352. 'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
  2353. 'munderover', 'none', 'semantics'])
  2354. mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
  2355. 'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
  2356. 'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
  2357. 'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
  2358. 'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
  2359. 'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
  2360. 'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
  2361. 'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
  2362. 'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
  2363. # svgtiny - foreignObject + linearGradient + radialGradient + stop
  2364. svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
  2365. 'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
  2366. 'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
  2367. 'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
  2368. 'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
  2369. 'svg', 'switch', 'text', 'title', 'tspan', 'use'])
  2370. # svgtiny + class + opacity + offset + xmlns + xmlns:xlink
  2371. svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
  2372. 'arabic-form', 'ascent', 'attributeName', 'attributeType',
  2373. 'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
  2374. 'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
  2375. 'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
  2376. 'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
  2377. 'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
  2378. 'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
  2379. 'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
  2380. 'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
  2381. 'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
  2382. 'min', 'name', 'offset', 'opacity', 'orient', 'origin',
  2383. 'overline-position', 'overline-thickness', 'panose-1', 'path',
  2384. 'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
  2385. 'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
  2386. 'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
  2387. 'stop-color', 'stop-opacity', 'strikethrough-position',
  2388. 'strikethrough-thickness', 'stroke', 'stroke-dasharray',
  2389. 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
  2390. 'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
  2391. 'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
  2392. 'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
  2393. 'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
  2394. 'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
  2395. 'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
  2396. 'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
  2397. 'y2', 'zoomAndPan'])
  2398. svg_attr_map = None
  2399. svg_elem_map = None
  2400. acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
  2401. 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
  2402. 'stroke-opacity'])
  2403. def reset(self):
  2404. _BaseHTMLProcessor.reset(self)
  2405. self.unacceptablestack = 0
  2406. self.mathmlOK = 0
  2407. self.svgOK = 0
  2408. def unknown_starttag(self, tag, attrs):
  2409. acceptable_attributes = self.acceptable_attributes
  2410. keymap = {}
  2411. if not tag in self.acceptable_elements or self.svgOK:
  2412. if tag in self.unacceptable_elements_with_end_tag:
  2413. self.unacceptablestack += 1
  2414. # add implicit namespaces to html5 inline svg/mathml
  2415. if self._type.endswith('html'):
  2416. if not dict(attrs).get('xmlns'):
  2417. if tag=='svg':
  2418. attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
  2419. if tag=='math':
  2420. attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
  2421. # not otherwise acceptable, perhaps it is MathML or SVG?
  2422. if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
  2423. self.mathmlOK += 1
  2424. if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
  2425. self.svgOK += 1
  2426. # chose acceptable attributes based on tag class, else bail
  2427. if self.mathmlOK and tag in self.mathml_elements:
  2428. acceptable_attributes = self.mathml_attributes
  2429. elif self.svgOK and tag in self.svg_elements:
  2430. # for most vocabularies, lowercasing is a good idea. Many
  2431. # svg elements, however, are camel case
  2432. if not self.svg_attr_map:
  2433. lower=[attr.lower() for attr in self.svg_attributes]
  2434. mix=[a for a in self.svg_attributes if a not in lower]
  2435. self.svg_attributes = lower
  2436. self.svg_attr_map = dict([(a.lower(),a) for a in mix])
  2437. lower=[attr.lower() for attr in self.svg_elements]
  2438. mix=[a for a in self.svg_elements if a not in lower]
  2439. self.svg_elements = lower
  2440. self.svg_elem_map = dict([(a.lower(),a) for a in mix])
  2441. acceptable_attributes = self.svg_attributes
  2442. tag = self.svg_elem_map.get(tag,tag)
  2443. keymap = self.svg_attr_map
  2444. elif not tag in self.acceptable_elements:
  2445. return
  2446. # declare xlink namespace, if needed
  2447. if self.mathmlOK or self.svgOK:
  2448. if filter(lambda (n,v): n.startswith('xlink:'),attrs):
  2449. if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
  2450. attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
  2451. clean_attrs = []
  2452. for key, value in self.normalize_attrs(attrs):
  2453. if key in acceptable_attributes:
  2454. key=keymap.get(key,key)
  2455. # make sure the uri uses an acceptable uri scheme
  2456. if key == u'href':
  2457. value = _makeSafeAbsoluteURI(value)
  2458. clean_attrs.append((key,value))
  2459. elif key=='style':
  2460. clean_value = self.sanitize_style(value)
  2461. if clean_value:
  2462. clean_attrs.append((key,clean_value))
  2463. _BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
  2464. def unknown_endtag(self, tag):
  2465. if not tag in self.acceptable_elements:
  2466. if tag in self.unacceptable_elements_with_end_tag:
  2467. self.unacceptablestack -= 1
  2468. if self.mathmlOK and tag in self.mathml_elements:
  2469. if tag == 'math' and self.mathmlOK:
  2470. self.mathmlOK -= 1
  2471. elif self.svgOK and tag in self.svg_elements:
  2472. tag = self.svg_elem_map.get(tag,tag)
  2473. if tag == 'svg' and self.svgOK:
  2474. self.svgOK -= 1
  2475. else:
  2476. return
  2477. _BaseHTMLProcessor.unknown_endtag(self, tag)
  2478. def handle_pi(self, text):
  2479. pass
  2480. def handle_decl(self, text):
  2481. pass
  2482. def handle_data(self, text):
  2483. if not self.unacceptablestack:
  2484. _BaseHTMLProcessor.handle_data(self, text)
  2485. def sanitize_style(self, style):
  2486. # disallow urls
  2487. style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
  2488. # gauntlet
  2489. if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
  2490. return ''
  2491. # This replaced a regexp that used re.match and was prone to pathological back-tracking.
  2492. if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
  2493. return ''
  2494. clean = []
  2495. for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
  2496. if not value:
  2497. continue
  2498. if prop.lower() in self.acceptable_css_properties:
  2499. clean.append(prop + ': ' + value + ';')
  2500. elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
  2501. for keyword in value.split():
  2502. if not keyword in self.acceptable_css_keywords and \
  2503. not self.valid_css_values.match(keyword):
  2504. break
  2505. else:
  2506. clean.append(prop + ': ' + value + ';')
  2507. elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
  2508. clean.append(prop + ': ' + value + ';')
  2509. return ' '.join(clean)
  2510. def parse_comment(self, i, report=1):
  2511. ret = _BaseHTMLProcessor.parse_comment(self, i, report)
  2512. if ret >= 0:
  2513. return ret
  2514. # if ret == -1, this may be a malicious attempt to circumvent
  2515. # sanitization, or a page-destroying unclosed comment
  2516. match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
  2517. if match:
  2518. return match.end()
  2519. # unclosed comment; deliberately fail to handle_data()
  2520. return len(self.rawdata)
  2521. def _sanitizeHTML(htmlSource, encoding, _type):
  2522. if not _SGML_AVAILABLE:
  2523. return htmlSource
  2524. p = _HTMLSanitizer(encoding, _type)
  2525. htmlSource = htmlSource.replace('<![CDATA[', '&lt;![CDATA[')
  2526. p.feed(htmlSource)
  2527. data = p.output()
  2528. if TIDY_MARKUP:
  2529. # loop through list of preferred Tidy interfaces looking for one that's installed,
  2530. # then set up a common _tidy function to wrap the interface-specific API.
  2531. _tidy = None
  2532. for tidy_interface in PREFERRED_TIDY_INTERFACES:
  2533. try:
  2534. if tidy_interface == "uTidy":
  2535. from tidy import parseString as _utidy
  2536. def _tidy(data, **kwargs):
  2537. return str(_utidy(data, **kwargs))
  2538. break
  2539. elif tidy_interface == "mxTidy":
  2540. from mx.Tidy import Tidy as _mxtidy
  2541. def _tidy(data, **kwargs):
  2542. nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
  2543. return data
  2544. break
  2545. except:
  2546. pass
  2547. if _tidy:
  2548. utf8 = isinstance(data, unicode)
  2549. if utf8:
  2550. data = data.encode('utf-8')
  2551. data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
  2552. if utf8:
  2553. data = unicode(data, 'utf-8')
  2554. if data.count('<body'):
  2555. data = data.split('<body', 1)[1]
  2556. if data.count('>'):
  2557. data = data.split('>', 1)[1]
  2558. if data.count('</body'):
  2559. data = data.split('</body', 1)[0]
  2560. data = data.strip().replace('\r\n', '\n')
  2561. return data
  2562. class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
  2563. def http_error_default(self, req, fp, code, msg, headers):
  2564. # The default implementation just raises HTTPError.
  2565. # Forget that.
  2566. fp.status = code
  2567. return fp
  2568. def http_error_301(self, req, fp, code, msg, hdrs):
  2569. result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
  2570. code, msg, hdrs)
  2571. result.status = code
  2572. result.newurl = result.geturl()
  2573. return result
  2574. # The default implementations in urllib2.HTTPRedirectHandler
  2575. # are identical, so hardcoding a http_error_301 call above
  2576. # won't affect anything
  2577. http_error_300 = http_error_301
  2578. http_error_302 = http_error_301
  2579. http_error_303 = http_error_301
  2580. http_error_307 = http_error_301
  2581. def http_error_401(self, req, fp, code, msg, headers):
  2582. # Check if
  2583. # - server requires digest auth, AND
  2584. # - we tried (unsuccessfully) with basic auth, AND
  2585. # If all conditions hold, parse authentication information
  2586. # out of the Authorization header we sent the first time
  2587. # (for the username and password) and the WWW-Authenticate
  2588. # header the server sent back (for the realm) and retry
  2589. # the request with the appropriate digest auth headers instead.
  2590. # This evil genius hack has been brought to you by Aaron Swartz.
  2591. host = urlparse.urlparse(req.get_full_url())[1]
  2592. if base64 is None or 'Authorization' not in req.headers \
  2593. or 'WWW-Authenticate' not in headers:
  2594. return self.http_error_default(req, fp, code, msg, headers)
  2595. auth = _base64decode(req.headers['Authorization'].split(' ')[1])
  2596. user, passw = auth.split(':')
  2597. realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
  2598. self.add_password(realm, host, user, passw)
  2599. retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
  2600. self.reset_retry_count()
  2601. return retry
  2602. def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
  2603. """URL, filename, or string --> stream
  2604. This function lets you define parsers that take any input source
  2605. (URL, pathname to local or network file, or actual data as a string)
  2606. and deal with it in a uniform manner. Returned object is guaranteed
  2607. to have all the basic stdio read methods (read, readline, readlines).
  2608. Just .close() the object when you're done with it.
  2609. If the etag argument is supplied, it will be used as the value of an
  2610. If-None-Match request header.
  2611. If the modified argument is supplied, it can be a tuple of 9 integers
  2612. (as returned by gmtime() in the standard Python time module) or a date
  2613. string in any format supported by feedparser. Regardless, it MUST
  2614. be in GMT (Greenwich Mean Time). It will be reformatted into an
  2615. RFC 1123-compliant date and used as the value of an If-Modified-Since
  2616. request header.
  2617. If the agent argument is supplied, it will be used as the value of a
  2618. User-Agent request header.
  2619. If the referrer argument is supplied, it will be used as the value of a
  2620. Referer[sic] request header.
  2621. If handlers is supplied, it is a list of handlers used to build a
  2622. urllib2 opener.
  2623. if request_headers is supplied it is a dictionary of HTTP request headers
  2624. that will override the values generated by FeedParser.
  2625. """
  2626. if hasattr(url_file_stream_or_string, 'read'):
  2627. return url_file_stream_or_string
  2628. if isinstance(url_file_stream_or_string, basestring) \
  2629. and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
  2630. # Deal with the feed URI scheme
  2631. if url_file_stream_or_string.startswith('feed:http'):
  2632. url_file_stream_or_string = url_file_stream_or_string[5:]
  2633. elif url_file_stream_or_string.startswith('feed:'):
  2634. url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
  2635. if not agent:
  2636. agent = USER_AGENT
  2637. # test for inline user:password for basic auth
  2638. auth = None
  2639. if base64:
  2640. urltype, rest = urllib.splittype(url_file_stream_or_string)
  2641. realhost, rest = urllib.splithost(rest)
  2642. if realhost:
  2643. user_passwd, realhost = urllib.splituser(realhost)
  2644. if user_passwd:
  2645. url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
  2646. auth = base64.standard_b64encode(user_passwd).strip()
  2647. # iri support
  2648. if isinstance(url_file_stream_or_string, unicode):
  2649. url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
  2650. # try to open with urllib2 (to use optional headers)
  2651. request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
  2652. opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
  2653. opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
  2654. try:
  2655. return opener.open(request)
  2656. finally:
  2657. opener.close() # JohnD
  2658. # try to open with native open function (if url_file_stream_or_string is a filename)
  2659. try:
  2660. return open(url_file_stream_or_string, 'rb')
  2661. except (IOError, UnicodeEncodeError, TypeError):
  2662. # if url_file_stream_or_string is a unicode object that
  2663. # cannot be converted to the encoding returned by
  2664. # sys.getfilesystemencoding(), a UnicodeEncodeError
  2665. # will be thrown
  2666. # If url_file_stream_or_string is a string that contains NULL
  2667. # (such as an XML document encoded in UTF-32), TypeError will
  2668. # be thrown.
  2669. pass
  2670. # treat url_file_stream_or_string as string
  2671. if isinstance(url_file_stream_or_string, unicode):
  2672. return _StringIO(url_file_stream_or_string.encode('utf-8'))
  2673. return _StringIO(url_file_stream_or_string)
  2674. def _convert_to_idn(url):
  2675. """Convert a URL to IDN notation"""
  2676. # this function should only be called with a unicode string
  2677. # strategy: if the host cannot be encoded in ascii, then
  2678. # it'll be necessary to encode it in idn form
  2679. parts = list(urlparse.urlsplit(url))
  2680. try:
  2681. parts[1].encode('ascii')
  2682. except UnicodeEncodeError:
  2683. # the url needs to be converted to idn notation
  2684. host = parts[1].rsplit(':', 1)
  2685. newhost = []
  2686. port = u''
  2687. if len(host) == 2:
  2688. port = host.pop()
  2689. for h in host[0].split('.'):
  2690. newhost.append(h.encode('idna').decode('utf-8'))
  2691. parts[1] = '.'.join(newhost)
  2692. if port:
  2693. parts[1] += ':' + port
  2694. return urlparse.urlunsplit(parts)
  2695. else:
  2696. return url
  2697. def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
  2698. request = urllib2.Request(url)
  2699. request.add_header('User-Agent', agent)
  2700. if etag:
  2701. request.add_header('If-None-Match', etag)
  2702. if isinstance(modified, basestring):
  2703. modified = _parse_date(modified)
  2704. elif isinstance(modified, datetime.datetime):
  2705. modified = modified.utctimetuple()
  2706. if modified:
  2707. # format into an RFC 1123-compliant timestamp. We can't use
  2708. # time.strftime() since the %a and %b directives can be affected
  2709. # by the current locale, but RFC 2616 states that dates must be
  2710. # in English.
  2711. short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
  2712. months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
  2713. request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
  2714. if referrer:
  2715. request.add_header('Referer', referrer)
  2716. if gzip and zlib:
  2717. request.add_header('Accept-encoding', 'gzip, deflate')
  2718. elif gzip:
  2719. request.add_header('Accept-encoding', 'gzip')
  2720. elif zlib:
  2721. request.add_header('Accept-encoding', 'deflate')
  2722. else:
  2723. request.add_header('Accept-encoding', '')
  2724. if auth:
  2725. request.add_header('Authorization', 'Basic %s' % auth)
  2726. if ACCEPT_HEADER:
  2727. request.add_header('Accept', ACCEPT_HEADER)
  2728. # use this for whatever -- cookies, special headers, etc
  2729. # [('Cookie','Something'),('x-special-header','Another Value')]
  2730. for header_name, header_value in request_headers.items():
  2731. request.add_header(header_name, header_value)
  2732. request.add_header('A-IM', 'feed') # RFC 3229 support
  2733. return request
  2734. _date_handlers = []
  2735. def registerDateHandler(func):
  2736. '''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
  2737. _date_handlers.insert(0, func)
  2738. # ISO-8601 date parsing routines written by Fazal Majid.
  2739. # The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
  2740. # parser is beyond the scope of feedparser and would be a worthwhile addition
  2741. # to the Python library.
  2742. # A single regular expression cannot parse ISO 8601 date formats into groups
  2743. # as the standard is highly irregular (for instance is 030104 2003-01-04 or
  2744. # 0301-04-01), so we use templates instead.
  2745. # Please note the order in templates is significant because we need a
  2746. # greedy match.
  2747. _iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
  2748. 'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
  2749. '-YY-?MM', '-OOO', '-YY',
  2750. '--MM-?DD', '--MM',
  2751. '---DD',
  2752. 'CC', '']
  2753. _iso8601_re = [
  2754. tmpl.replace(
  2755. 'YYYY', r'(?P<year>\d{4})').replace(
  2756. 'YY', r'(?P<year>\d\d)').replace(
  2757. 'MM', r'(?P<month>[01]\d)').replace(
  2758. 'DD', r'(?P<day>[0123]\d)').replace(
  2759. 'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
  2760. 'CC', r'(?P<century>\d\d$)')
  2761. + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
  2762. + r'(:(?P<second>\d{2}))?'
  2763. + r'(\.(?P<fracsecond>\d+))?'
  2764. + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
  2765. for tmpl in _iso8601_tmpl]
  2766. try:
  2767. del tmpl
  2768. except NameError:
  2769. pass
  2770. _iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
  2771. try:
  2772. del regex
  2773. except NameError:
  2774. pass
  2775. def _parse_date_iso8601(dateString):
  2776. '''Parse a variety of ISO-8601-compatible formats like 20040105'''
  2777. m = None
  2778. for _iso8601_match in _iso8601_matches:
  2779. m = _iso8601_match(dateString)
  2780. if m:
  2781. break
  2782. if not m:
  2783. return
  2784. if m.span() == (0, 0):
  2785. return
  2786. params = m.groupdict()
  2787. ordinal = params.get('ordinal', 0)
  2788. if ordinal:
  2789. ordinal = int(ordinal)
  2790. else:
  2791. ordinal = 0
  2792. year = params.get('year', '--')
  2793. if not year or year == '--':
  2794. year = time.gmtime()[0]
  2795. elif len(year) == 2:
  2796. # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
  2797. year = 100 * int(time.gmtime()[0] / 100) + int(year)
  2798. else:
  2799. year = int(year)
  2800. month = params.get('month', '-')
  2801. if not month or month == '-':
  2802. # ordinals are NOT normalized by mktime, we simulate them
  2803. # by setting month=1, day=ordinal
  2804. if ordinal:
  2805. month = 1
  2806. else:
  2807. month = time.gmtime()[1]
  2808. month = int(month)
  2809. day = params.get('day', 0)
  2810. if not day:
  2811. # see above
  2812. if ordinal:
  2813. day = ordinal
  2814. elif params.get('century', 0) or \
  2815. params.get('year', 0) or params.get('month', 0):
  2816. day = 1
  2817. else:
  2818. day = time.gmtime()[2]
  2819. else:
  2820. day = int(day)
  2821. # special case of the century - is the first year of the 21st century
  2822. # 2000 or 2001 ? The debate goes on...
  2823. if 'century' in params:
  2824. year = (int(params['century']) - 1) * 100 + 1
  2825. # in ISO 8601 most fields are optional
  2826. for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
  2827. if not params.get(field, None):
  2828. params[field] = 0
  2829. hour = int(params.get('hour', 0))
  2830. minute = int(params.get('minute', 0))
  2831. second = int(float(params.get('second', 0)))
  2832. # weekday is normalized by mktime(), we can ignore it
  2833. weekday = 0
  2834. daylight_savings_flag = -1
  2835. tm = [year, month, day, hour, minute, second, weekday,
  2836. ordinal, daylight_savings_flag]
  2837. # ISO 8601 time zone adjustments
  2838. tz = params.get('tz')
  2839. if tz and tz != 'Z':
  2840. if tz[0] == '-':
  2841. tm[3] += int(params.get('tzhour', 0))
  2842. tm[4] += int(params.get('tzmin', 0))
  2843. elif tz[0] == '+':
  2844. tm[3] -= int(params.get('tzhour', 0))
  2845. tm[4] -= int(params.get('tzmin', 0))
  2846. else:
  2847. return None
  2848. # Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
  2849. # which is guaranteed to normalize d/m/y/h/m/s.
  2850. # Many implementations have bugs, but we'll pretend they don't.
  2851. return time.localtime(time.mktime(tuple(tm)))
  2852. registerDateHandler(_parse_date_iso8601)
  2853. # 8-bit date handling routines written by ytrewq1.
  2854. _korean_year = u'\ub144' # b3e2 in euc-kr
  2855. _korean_month = u'\uc6d4' # bff9 in euc-kr
  2856. _korean_day = u'\uc77c' # c0cf in euc-kr
  2857. _korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
  2858. _korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
  2859. _korean_onblog_date_re = \
  2860. re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
  2861. (_korean_year, _korean_month, _korean_day))
  2862. _korean_nate_date_re = \
  2863. re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
  2864. (_korean_am, _korean_pm))
  2865. def _parse_date_onblog(dateString):
  2866. '''Parse a string according to the OnBlog 8-bit date format'''
  2867. m = _korean_onblog_date_re.match(dateString)
  2868. if not m:
  2869. return
  2870. w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
  2871. {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
  2872. 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
  2873. 'zonediff': '+09:00'}
  2874. return _parse_date_w3dtf(w3dtfdate)
  2875. registerDateHandler(_parse_date_onblog)
  2876. def _parse_date_nate(dateString):
  2877. '''Parse a string according to the Nate 8-bit date format'''
  2878. m = _korean_nate_date_re.match(dateString)
  2879. if not m:
  2880. return
  2881. hour = int(m.group(5))
  2882. ampm = m.group(4)
  2883. if (ampm == _korean_pm):
  2884. hour += 12
  2885. hour = str(hour)
  2886. if len(hour) == 1:
  2887. hour = '0' + hour
  2888. w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
  2889. {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
  2890. 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
  2891. 'zonediff': '+09:00'}
  2892. return _parse_date_w3dtf(w3dtfdate)
  2893. registerDateHandler(_parse_date_nate)
  2894. # Unicode strings for Greek date strings
  2895. _greek_months = \
  2896. { \
  2897. u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
  2898. u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
  2899. u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
  2900. u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
  2901. u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
  2902. u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
  2903. u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
  2904. u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
  2905. u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
  2906. u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
  2907. u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
  2908. u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
  2909. u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
  2910. u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
  2911. u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
  2912. u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
  2913. u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
  2914. u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
  2915. u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
  2916. }
  2917. _greek_wdays = \
  2918. { \
  2919. u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
  2920. u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
  2921. u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
  2922. u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
  2923. u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
  2924. u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
  2925. u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
  2926. }
  2927. _greek_date_format_re = \
  2928. re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
  2929. def _parse_date_greek(dateString):
  2930. '''Parse a string according to a Greek 8-bit date format.'''
  2931. m = _greek_date_format_re.match(dateString)
  2932. if not m:
  2933. return
  2934. wday = _greek_wdays[m.group(1)]
  2935. month = _greek_months[m.group(3)]
  2936. rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
  2937. {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
  2938. 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
  2939. 'zonediff': m.group(8)}
  2940. return _parse_date_rfc822(rfc822date)
  2941. registerDateHandler(_parse_date_greek)
  2942. # Unicode strings for Hungarian date strings
  2943. _hungarian_months = \
  2944. { \
  2945. u'janu\u00e1r': u'01', # e1 in iso-8859-2
  2946. u'febru\u00e1ri': u'02', # e1 in iso-8859-2
  2947. u'm\u00e1rcius': u'03', # e1 in iso-8859-2
  2948. u'\u00e1prilis': u'04', # e1 in iso-8859-2
  2949. u'm\u00e1ujus': u'05', # e1 in iso-8859-2
  2950. u'j\u00fanius': u'06', # fa in iso-8859-2
  2951. u'j\u00falius': u'07', # fa in iso-8859-2
  2952. u'augusztus': u'08',
  2953. u'szeptember': u'09',
  2954. u'okt\u00f3ber': u'10', # f3 in iso-8859-2
  2955. u'november': u'11',
  2956. u'december': u'12',
  2957. }
  2958. _hungarian_date_format_re = \
  2959. re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
  2960. def _parse_date_hungarian(dateString):
  2961. '''Parse a string according to a Hungarian 8-bit date format.'''
  2962. m = _hungarian_date_format_re.match(dateString)
  2963. if not m or m.group(2) not in _hungarian_months:
  2964. return None
  2965. month = _hungarian_months[m.group(2)]
  2966. day = m.group(3)
  2967. if len(day) == 1:
  2968. day = '0' + day
  2969. hour = m.group(4)
  2970. if len(hour) == 1:
  2971. hour = '0' + hour
  2972. w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
  2973. {'year': m.group(1), 'month': month, 'day': day,\
  2974. 'hour': hour, 'minute': m.group(5),\
  2975. 'zonediff': m.group(6)}
  2976. return _parse_date_w3dtf(w3dtfdate)
  2977. registerDateHandler(_parse_date_hungarian)
  2978. # W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
  2979. # Drake and licensed under the Python license. Removed all range checking
  2980. # for month, day, hour, minute, and second, since mktime will normalize
  2981. # these later
  2982. # Modified to also support MSSQL-style datetimes as defined at:
  2983. # http://msdn.microsoft.com/en-us/library/ms186724.aspx
  2984. # (which basically means allowing a space as a date/time/timezone separator)
  2985. def _parse_date_w3dtf(dateString):
  2986. def __extract_date(m):
  2987. year = int(m.group('year'))
  2988. if year < 100:
  2989. year = 100 * int(time.gmtime()[0] / 100) + int(year)
  2990. if year < 1000:
  2991. return 0, 0, 0
  2992. julian = m.group('julian')
  2993. if julian:
  2994. julian = int(julian)
  2995. month = julian / 30 + 1
  2996. day = julian % 30 + 1
  2997. jday = None
  2998. while jday != julian:
  2999. t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
  3000. jday = time.gmtime(t)[-2]
  3001. diff = abs(jday - julian)
  3002. if jday > julian:
  3003. if diff < day:
  3004. day = day - diff
  3005. else:
  3006. month = month - 1
  3007. day = 31
  3008. elif jday < julian:
  3009. if day + diff < 28:
  3010. day = day + diff
  3011. else:
  3012. month = month + 1
  3013. return year, month, day
  3014. month = m.group('month')
  3015. day = 1
  3016. if month is None:
  3017. month = 1
  3018. else:
  3019. month = int(month)
  3020. day = m.group('day')
  3021. if day:
  3022. day = int(day)
  3023. else:
  3024. day = 1
  3025. return year, month, day
  3026. def __extract_time(m):
  3027. if not m:
  3028. return 0, 0, 0
  3029. hours = m.group('hours')
  3030. if not hours:
  3031. return 0, 0, 0
  3032. hours = int(hours)
  3033. minutes = int(m.group('minutes'))
  3034. seconds = m.group('seconds')
  3035. if seconds:
  3036. seconds = int(seconds)
  3037. else:
  3038. seconds = 0
  3039. return hours, minutes, seconds
  3040. def __extract_tzd(m):
  3041. '''Return the Time Zone Designator as an offset in seconds from UTC.'''
  3042. if not m:
  3043. return 0
  3044. tzd = m.group('tzd')
  3045. if not tzd:
  3046. return 0
  3047. if tzd == 'Z':
  3048. return 0
  3049. hours = int(m.group('tzdhours'))
  3050. minutes = m.group('tzdminutes')
  3051. if minutes:
  3052. minutes = int(minutes)
  3053. else:
  3054. minutes = 0
  3055. offset = (hours*60 + minutes) * 60
  3056. if tzd[0] == '+':
  3057. return -offset
  3058. return offset
  3059. __date_re = ('(?P<year>\d\d\d\d)'
  3060. '(?:(?P<dsep>-|)'
  3061. '(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?'
  3062. '|(?P<julian>\d\d\d)))?')
  3063. __tzd_re = ' ?(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)?'
  3064. __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
  3065. '(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?'
  3066. + __tzd_re)
  3067. __datetime_re = '%s(?:[T ]%s)?' % (__date_re, __time_re)
  3068. __datetime_rx = re.compile(__datetime_re)
  3069. m = __datetime_rx.match(dateString)
  3070. if (m is None) or (m.group() != dateString):
  3071. return
  3072. gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
  3073. if gmt[0] == 0:
  3074. return
  3075. return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
  3076. registerDateHandler(_parse_date_w3dtf)
  3077. # Define the strings used by the RFC822 datetime parser
  3078. _rfc822_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
  3079. 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
  3080. _rfc822_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
  3081. # Only the first three letters of the month name matter
  3082. _rfc822_month = "(?P<month>%s)(?:[a-z]*,?)" % ('|'.join(_rfc822_months))
  3083. # The year may be 2 or 4 digits; capture the century if it exists
  3084. _rfc822_year = "(?P<year>(?:\d{2})?\d{2})"
  3085. _rfc822_day = "(?P<day> *\d{1,2})"
  3086. _rfc822_date = "%s %s %s" % (_rfc822_day, _rfc822_month, _rfc822_year)
  3087. _rfc822_hour = "(?P<hour>\d{2}):(?P<minute>\d{2})(?::(?P<second>\d{2}))?"
  3088. _rfc822_tz = "(?P<tz>ut|gmt(?:[+-]\d{2}:\d{2})?|[aecmp][sd]?t|[zamny]|[+-]\d{4})"
  3089. _rfc822_tznames = {
  3090. 'ut': 0, 'gmt': 0, 'z': 0,
  3091. 'adt': -3, 'ast': -4, 'at': -4,
  3092. 'edt': -4, 'est': -5, 'et': -5,
  3093. 'cdt': -5, 'cst': -6, 'ct': -6,
  3094. 'mdt': -6, 'mst': -7, 'mt': -7,
  3095. 'pdt': -7, 'pst': -8, 'pt': -8,
  3096. 'a': -1, 'n': 1,
  3097. 'm': -12, 'y': 12,
  3098. }
  3099. # The timezone may be prefixed by 'Etc/'
  3100. _rfc822_time = "%s (?:etc/)?%s" % (_rfc822_hour, _rfc822_tz)
  3101. _rfc822_dayname = "(?P<dayname>%s)" % ('|'.join(_rfc822_daynames))
  3102. _rfc822_match = re.compile(
  3103. "(?:%s, )?%s(?: %s)?" % (_rfc822_dayname, _rfc822_date, _rfc822_time)
  3104. ).match
  3105. def _parse_date_rfc822(dt):
  3106. """Parse RFC 822 dates and times, with one minor
  3107. difference: years may be 4DIGIT or 2DIGIT.
  3108. http://tools.ietf.org/html/rfc822#section-5"""
  3109. try:
  3110. m = _rfc822_match(dt.lower()).groupdict(0)
  3111. except AttributeError:
  3112. return None
  3113. # Calculate a date and timestamp
  3114. for k in ('year', 'day', 'hour', 'minute', 'second'):
  3115. m[k] = int(m[k])
  3116. m['month'] = _rfc822_months.index(m['month']) + 1
  3117. # If the year is 2 digits, assume everything in the 90's is the 1990's
  3118. if m['year'] < 100:
  3119. m['year'] += (1900, 2000)[m['year'] < 90]
  3120. stamp = datetime.datetime(*[m[i] for i in
  3121. ('year', 'month', 'day', 'hour', 'minute', 'second')])
  3122. # Use the timezone information to calculate the difference between
  3123. # the given date and timestamp and Universal Coordinated Time
  3124. tzhour = 0
  3125. tzmin = 0
  3126. if m['tz'] and m['tz'].startswith('gmt'):
  3127. # Handle GMT and GMT+hh:mm timezone syntax (the trailing
  3128. # timezone info will be handled by the next `if` block)
  3129. m['tz'] = ''.join(m['tz'][3:].split(':')) or 'gmt'
  3130. if not m['tz']:
  3131. pass
  3132. elif m['tz'].startswith('+'):
  3133. tzhour = int(m['tz'][1:3])
  3134. tzmin = int(m['tz'][3:])
  3135. elif m['tz'].startswith('-'):
  3136. tzhour = int(m['tz'][1:3]) * -1
  3137. tzmin = int(m['tz'][3:]) * -1
  3138. else:
  3139. tzhour = _rfc822_tznames[m['tz']]
  3140. delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
  3141. # Return the date and timestamp in UTC
  3142. return (stamp - delta).utctimetuple()
  3143. registerDateHandler(_parse_date_rfc822)
  3144. def _parse_date_asctime(dt):
  3145. """Parse asctime-style dates"""
  3146. dayname, month, day, remainder = dt.split(None, 3)
  3147. # Convert month and day into zero-padded integers
  3148. month = '%02i ' % (_rfc822_months.index(month.lower()) + 1)
  3149. day = '%02i ' % (int(day),)
  3150. dt = month + day + remainder
  3151. return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, )
  3152. registerDateHandler(_parse_date_asctime)
  3153. def _parse_date_perforce(aDateString):
  3154. """parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
  3155. # Fri, 2006/09/15 08:19:53 EDT
  3156. _my_date_pattern = re.compile( \
  3157. r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
  3158. m = _my_date_pattern.search(aDateString)
  3159. if m is None:
  3160. return None
  3161. dow, year, month, day, hour, minute, second, tz = m.groups()
  3162. months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
  3163. dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
  3164. tm = rfc822.parsedate_tz(dateString)
  3165. if tm:
  3166. return time.gmtime(rfc822.mktime_tz(tm))
  3167. registerDateHandler(_parse_date_perforce)
  3168. def _parse_date(dateString):
  3169. '''Parses a variety of date formats into a 9-tuple in GMT'''
  3170. if not dateString:
  3171. return None
  3172. for handler in _date_handlers:
  3173. try:
  3174. date9tuple = handler(dateString)
  3175. except (KeyError, OverflowError, ValueError):
  3176. continue
  3177. if not date9tuple:
  3178. continue
  3179. if len(date9tuple) != 9:
  3180. continue
  3181. return date9tuple
  3182. return None
  3183. # Each marker represents some of the characters of the opening XML
  3184. # processing instruction ('<?xm') in the specified encoding.
  3185. EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94])
  3186. UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F])
  3187. UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00])
  3188. UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C])
  3189. UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00])
  3190. ZERO_BYTES = _l2bytes([0x00, 0x00])
  3191. # Match the opening XML declaration.
  3192. # Example: <?xml version="1.0" encoding="utf-8"?>
  3193. RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
  3194. # Capture the value of the XML processing instruction's encoding attribute.
  3195. # Example: <?xml version="1.0" encoding="utf-8"?>
  3196. RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
  3197. def convert_to_utf8(http_headers, data):
  3198. '''Detect and convert the character encoding to UTF-8.
  3199. http_headers is a dictionary
  3200. data is a raw string (not Unicode)'''
  3201. # This is so much trickier than it sounds, it's not even funny.
  3202. # According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
  3203. # is application/xml, application/*+xml,
  3204. # application/xml-external-parsed-entity, or application/xml-dtd,
  3205. # the encoding given in the charset parameter of the HTTP Content-Type
  3206. # takes precedence over the encoding given in the XML prefix within the
  3207. # document, and defaults to 'utf-8' if neither are specified. But, if
  3208. # the HTTP Content-Type is text/xml, text/*+xml, or
  3209. # text/xml-external-parsed-entity, the encoding given in the XML prefix
  3210. # within the document is ALWAYS IGNORED and only the encoding given in
  3211. # the charset parameter of the HTTP Content-Type header should be
  3212. # respected, and it defaults to 'us-ascii' if not specified.
  3213. # Furthermore, discussion on the atom-syntax mailing list with the
  3214. # author of RFC 3023 leads me to the conclusion that any document
  3215. # served with a Content-Type of text/* and no charset parameter
  3216. # must be treated as us-ascii. (We now do this.) And also that it
  3217. # must always be flagged as non-well-formed. (We now do this too.)
  3218. # If Content-Type is unspecified (input was local file or non-HTTP source)
  3219. # or unrecognized (server just got it totally wrong), then go by the
  3220. # encoding given in the XML prefix of the document and default to
  3221. # 'iso-8859-1' as per the HTTP specification (RFC 2616).
  3222. # Then, assuming we didn't find a character encoding in the HTTP headers
  3223. # (and the HTTP Content-type allowed us to look in the body), we need
  3224. # to sniff the first few bytes of the XML data and try to determine
  3225. # whether the encoding is ASCII-compatible. Section F of the XML
  3226. # specification shows the way here:
  3227. # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
  3228. # If the sniffed encoding is not ASCII-compatible, we need to make it
  3229. # ASCII compatible so that we can sniff further into the XML declaration
  3230. # to find the encoding attribute, which will tell us the true encoding.
  3231. # Of course, none of this guarantees that we will be able to parse the
  3232. # feed in the declared character encoding (assuming it was declared
  3233. # correctly, which many are not). iconv_codec can help a lot;
  3234. # you should definitely install it if you can.
  3235. # http://cjkpython.i18n.org/
  3236. bom_encoding = u''
  3237. xml_encoding = u''
  3238. rfc3023_encoding = u''
  3239. # Look at the first few bytes of the document to guess what
  3240. # its encoding may be. We only need to decode enough of the
  3241. # document that we can use an ASCII-compatible regular
  3242. # expression to search for an XML encoding declaration.
  3243. # The heuristic follows the XML specification, section F:
  3244. # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
  3245. # Check for BOMs first.
  3246. if data[:4] == codecs.BOM_UTF32_BE:
  3247. bom_encoding = u'utf-32be'
  3248. data = data[4:]
  3249. elif data[:4] == codecs.BOM_UTF32_LE:
  3250. bom_encoding = u'utf-32le'
  3251. data = data[4:]
  3252. elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
  3253. bom_encoding = u'utf-16be'
  3254. data = data[2:]
  3255. elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
  3256. bom_encoding = u'utf-16le'
  3257. data = data[2:]
  3258. elif data[:3] == codecs.BOM_UTF8:
  3259. bom_encoding = u'utf-8'
  3260. data = data[3:]
  3261. # Check for the characters '<?xm' in several encodings.
  3262. elif data[:4] == EBCDIC_MARKER:
  3263. bom_encoding = u'cp037'
  3264. elif data[:4] == UTF16BE_MARKER:
  3265. bom_encoding = u'utf-16be'
  3266. elif data[:4] == UTF16LE_MARKER:
  3267. bom_encoding = u'utf-16le'
  3268. elif data[:4] == UTF32BE_MARKER:
  3269. bom_encoding = u'utf-32be'
  3270. elif data[:4] == UTF32LE_MARKER:
  3271. bom_encoding = u'utf-32le'
  3272. tempdata = data
  3273. try:
  3274. if bom_encoding:
  3275. tempdata = data.decode(bom_encoding).encode('utf-8')
  3276. except (UnicodeDecodeError, LookupError):
  3277. # feedparser recognizes UTF-32 encodings that aren't
  3278. # available in Python 2.4 and 2.5, so it's possible to
  3279. # encounter a LookupError during decoding.
  3280. xml_encoding_match = None
  3281. else:
  3282. xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
  3283. if xml_encoding_match:
  3284. xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
  3285. # Normalize the xml_encoding if necessary.
  3286. if bom_encoding and (xml_encoding in (
  3287. u'u16', u'utf-16', u'utf16', u'utf_16',
  3288. u'u32', u'utf-32', u'utf32', u'utf_32',
  3289. u'iso-10646-ucs-2', u'iso-10646-ucs-4',
  3290. u'csucs4', u'csunicode', u'ucs-2', u'ucs-4'
  3291. )):
  3292. xml_encoding = bom_encoding
  3293. # Find the HTTP Content-Type and, hopefully, a character
  3294. # encoding provided by the server. The Content-Type is used
  3295. # to choose the "correct" encoding among the BOM encoding,
  3296. # XML declaration encoding, and HTTP encoding, following the
  3297. # heuristic defined in RFC 3023.
  3298. http_content_type = http_headers.get('content-type') or ''
  3299. http_content_type, params = cgi.parse_header(http_content_type)
  3300. http_encoding = params.get('charset', '').replace("'", "")
  3301. if not isinstance(http_encoding, unicode):
  3302. http_encoding = http_encoding.decode('utf-8', 'ignore')
  3303. acceptable_content_type = 0
  3304. application_content_types = (u'application/xml', u'application/xml-dtd',
  3305. u'application/xml-external-parsed-entity')
  3306. text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
  3307. if (http_content_type in application_content_types) or \
  3308. (http_content_type.startswith(u'application/') and
  3309. http_content_type.endswith(u'+xml')):
  3310. acceptable_content_type = 1
  3311. rfc3023_encoding = http_encoding or xml_encoding or u'utf-8'
  3312. elif (http_content_type in text_content_types) or \
  3313. (http_content_type.startswith(u'text/') and
  3314. http_content_type.endswith(u'+xml')):
  3315. acceptable_content_type = 1
  3316. rfc3023_encoding = http_encoding or u'us-ascii'
  3317. elif http_content_type.startswith(u'text/'):
  3318. rfc3023_encoding = http_encoding or u'us-ascii'
  3319. elif http_headers and 'content-type' not in http_headers:
  3320. rfc3023_encoding = xml_encoding or u'iso-8859-1'
  3321. else:
  3322. rfc3023_encoding = xml_encoding or u'utf-8'
  3323. # gb18030 is a superset of gb2312, so always replace gb2312
  3324. # with gb18030 for greater compatibility.
  3325. if rfc3023_encoding.lower() == u'gb2312':
  3326. rfc3023_encoding = u'gb18030'
  3327. if xml_encoding.lower() == u'gb2312':
  3328. xml_encoding = u'gb18030'
  3329. # there are four encodings to keep track of:
  3330. # - http_encoding is the encoding declared in the Content-Type HTTP header
  3331. # - xml_encoding is the encoding declared in the <?xml declaration
  3332. # - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
  3333. # - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
  3334. error = None
  3335. if http_headers and (not acceptable_content_type):
  3336. if 'content-type' in http_headers:
  3337. msg = '%s is not an XML media type' % http_headers['content-type']
  3338. else:
  3339. msg = 'no Content-type specified'
  3340. error = NonXMLContentType(msg)
  3341. # determine character encoding
  3342. known_encoding = 0
  3343. chardet_encoding = None
  3344. tried_encodings = []
  3345. if chardet:
  3346. chardet_encoding = unicode(chardet.detect(data)['encoding'] or '', 'ascii', 'ignore')
  3347. # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
  3348. for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
  3349. chardet_encoding, u'utf-8', u'windows-1252', u'iso-8859-2'):
  3350. if not proposed_encoding:
  3351. continue
  3352. if proposed_encoding in tried_encodings:
  3353. continue
  3354. tried_encodings.append(proposed_encoding)
  3355. try:
  3356. data = data.decode(proposed_encoding)
  3357. except (UnicodeDecodeError, LookupError):
  3358. pass
  3359. else:
  3360. known_encoding = 1
  3361. # Update the encoding in the opening XML processing instruction.
  3362. new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
  3363. if RE_XML_DECLARATION.search(data):
  3364. data = RE_XML_DECLARATION.sub(new_declaration, data)
  3365. else:
  3366. data = new_declaration + u'\n' + data
  3367. data = data.encode('utf-8')
  3368. break
  3369. # if still no luck, give up
  3370. if not known_encoding:
  3371. error = CharacterEncodingUnknown(
  3372. 'document encoding unknown, I tried ' +
  3373. '%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
  3374. (rfc3023_encoding, xml_encoding))
  3375. rfc3023_encoding = u''
  3376. elif proposed_encoding != rfc3023_encoding:
  3377. error = CharacterEncodingOverride(
  3378. 'document declared as %s, but parsed as %s' %
  3379. (rfc3023_encoding, proposed_encoding))
  3380. rfc3023_encoding = proposed_encoding
  3381. return data, rfc3023_encoding, error
  3382. # Match XML entity declarations.
  3383. # Example: <!ENTITY copyright "(C)">
  3384. RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
  3385. # Match XML DOCTYPE declarations.
  3386. # Example: <!DOCTYPE feed [ ]>
  3387. RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
  3388. # Match safe entity declarations.
  3389. # This will allow hexadecimal character references through,
  3390. # as well as text, but not arbitrary nested entities.
  3391. # Example: cubed "&#179;"
  3392. # Example: copyright "(C)"
  3393. # Forbidden: explode1 "&explode2;&explode2;"
  3394. RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
  3395. def replace_doctype(data):
  3396. '''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
  3397. rss_version may be 'rss091n' or None
  3398. stripped_data is the same XML document with a replaced DOCTYPE
  3399. '''
  3400. # Divide the document into two groups by finding the location
  3401. # of the first element that doesn't begin with '<?' or '<!'.
  3402. start = re.search(_s2bytes('<\w'), data)
  3403. start = start and start.start() or -1
  3404. head, data = data[:start+1], data[start+1:]
  3405. # Save and then remove all of the ENTITY declarations.
  3406. entity_results = RE_ENTITY_PATTERN.findall(head)
  3407. head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
  3408. # Find the DOCTYPE declaration and check the feed type.
  3409. doctype_results = RE_DOCTYPE_PATTERN.findall(head)
  3410. doctype = doctype_results and doctype_results[0] or _s2bytes('')
  3411. if _s2bytes('netscape') in doctype.lower():
  3412. version = u'rss091n'
  3413. else:
  3414. version = None
  3415. # Re-insert the safe ENTITY declarations if a DOCTYPE was found.
  3416. replacement = _s2bytes('')
  3417. if len(doctype_results) == 1 and entity_results:
  3418. match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
  3419. safe_entities = filter(match_safe_entities, entity_results)
  3420. if safe_entities:
  3421. replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \
  3422. + _s2bytes('>\n<!ENTITY ').join(safe_entities) \
  3423. + _s2bytes('>\n]>')
  3424. data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
  3425. # Precompute the safe entities for the loose parser.
  3426. safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
  3427. for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
  3428. return version, data, safe_entities
  3429. def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
  3430. '''Parse a feed from a URL, file, stream, or string.
  3431. request_headers, if given, is a dict from http header name to value to add
  3432. to the request; this overrides internally generated values.
  3433. '''
  3434. if handlers is None:
  3435. handlers = []
  3436. if request_headers is None:
  3437. request_headers = {}
  3438. if response_headers is None:
  3439. response_headers = {}
  3440. result = FeedParserDict()
  3441. result['feed'] = FeedParserDict()
  3442. result['entries'] = []
  3443. result['bozo'] = 0
  3444. if not isinstance(handlers, list):
  3445. handlers = [handlers]
  3446. try:
  3447. f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
  3448. data = f.read()
  3449. except Exception, e:
  3450. result['bozo'] = 1
  3451. result['bozo_exception'] = e
  3452. data = None
  3453. f = None
  3454. if hasattr(f, 'headers'):
  3455. result['headers'] = dict(f.headers)
  3456. # overwrite existing headers using response_headers
  3457. if 'headers' in result:
  3458. result['headers'].update(response_headers)
  3459. elif response_headers:
  3460. result['headers'] = copy.deepcopy(response_headers)
  3461. # lowercase all of the HTTP headers for comparisons per RFC 2616
  3462. if 'headers' in result:
  3463. http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
  3464. else:
  3465. http_headers = {}
  3466. # if feed is gzip-compressed, decompress it
  3467. if f and data and http_headers:
  3468. if gzip and 'gzip' in http_headers.get('content-encoding', ''):
  3469. try:
  3470. data = gzip.GzipFile(fileobj=_StringIO(data)).read()
  3471. except (IOError, struct.error), e:
  3472. # IOError can occur if the gzip header is bad.
  3473. # struct.error can occur if the data is damaged.
  3474. result['bozo'] = 1
  3475. result['bozo_exception'] = e
  3476. if isinstance(e, struct.error):
  3477. # A gzip header was found but the data is corrupt.
  3478. # Ideally, we should re-request the feed without the
  3479. # 'Accept-encoding: gzip' header, but we don't.
  3480. data = None
  3481. elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
  3482. try:
  3483. data = zlib.decompress(data)
  3484. except zlib.error, e:
  3485. try:
  3486. # The data may have no headers and no checksum.
  3487. data = zlib.decompress(data, -15)
  3488. except zlib.error, e:
  3489. result['bozo'] = 1
  3490. result['bozo_exception'] = e
  3491. # save HTTP headers
  3492. if http_headers:
  3493. if 'etag' in http_headers:
  3494. etag = http_headers.get('etag', u'')
  3495. if not isinstance(etag, unicode):
  3496. etag = etag.decode('utf-8', 'ignore')
  3497. if etag:
  3498. result['etag'] = etag
  3499. if 'last-modified' in http_headers:
  3500. modified = http_headers.get('last-modified', u'')
  3501. if modified:
  3502. result['modified'] = modified
  3503. result['modified_parsed'] = _parse_date(modified)
  3504. if hasattr(f, 'url'):
  3505. if not isinstance(f.url, unicode):
  3506. result['href'] = f.url.decode('utf-8', 'ignore')
  3507. else:
  3508. result['href'] = f.url
  3509. result['status'] = 200
  3510. if hasattr(f, 'status'):
  3511. result['status'] = f.status
  3512. if hasattr(f, 'close'):
  3513. f.close()
  3514. if data is None:
  3515. return result
  3516. # Stop processing if the server sent HTTP 304 Not Modified.
  3517. if getattr(f, 'code', 0) == 304:
  3518. result['version'] = u''
  3519. result['debug_message'] = 'The feed has not changed since you last checked, ' + \
  3520. 'so the server sent no data. This is a feature, not a bug!'
  3521. return result
  3522. data, result['encoding'], error = convert_to_utf8(http_headers, data)
  3523. use_strict_parser = result['encoding'] and True or False
  3524. if error is not None:
  3525. result['bozo'] = 1
  3526. result['bozo_exception'] = error
  3527. result['version'], data, entities = replace_doctype(data)
  3528. # Ensure that baseuri is an absolute URI using an acceptable URI scheme.
  3529. contentloc = http_headers.get('content-location', u'')
  3530. href = result.get('href', u'')
  3531. baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
  3532. baselang = http_headers.get('content-language', None)
  3533. if not isinstance(baselang, unicode) and baselang is not None:
  3534. baselang = baselang.decode('utf-8', 'ignore')
  3535. if not _XML_AVAILABLE:
  3536. use_strict_parser = 0
  3537. if use_strict_parser:
  3538. # initialize the SAX parser
  3539. feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
  3540. saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
  3541. saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
  3542. try:
  3543. # disable downloading external doctype references, if possible
  3544. saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
  3545. except xml.sax.SAXNotSupportedException:
  3546. pass
  3547. saxparser.setContentHandler(feedparser)
  3548. saxparser.setErrorHandler(feedparser)
  3549. source = xml.sax.xmlreader.InputSource()
  3550. source.setByteStream(_StringIO(data))
  3551. try:
  3552. saxparser.parse(source)
  3553. except xml.sax.SAXException, e:
  3554. result['bozo'] = 1
  3555. result['bozo_exception'] = feedparser.exc or e
  3556. use_strict_parser = 0
  3557. if not use_strict_parser and _SGML_AVAILABLE:
  3558. feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
  3559. feedparser.feed(data.decode('utf-8', 'replace'))
  3560. result['feed'] = feedparser.feeddata
  3561. result['entries'] = feedparser.entries
  3562. result['version'] = result['version'] or feedparser.version
  3563. result['namespaces'] = feedparser.namespacesInUse
  3564. return result