PageRenderTime 41ms CodeModel.GetById 14ms RepoModel.GetById 0ms app.codeStats 1ms

/feedparser.py

https://github.com/dpw/pnntprss
Python | 4013 lines | 3681 code | 150 blank | 182 comment | 237 complexity | 1cd2e1c4a1372515454a48a1105d69e9 MD5 | raw file
  1. """Universal feed parser
  2. Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
  3. Visit https://code.google.com/p/feedparser/ for the latest version
  4. Visit http://packages.python.org/feedparser/ for the latest documentation
  5. Required: Python 2.4 or later
  6. Recommended: iconv_codec <http://cjkpython.i18n.org/>
  7. """
  8. __version__ = "5.1.3"
  9. __license__ = """
  10. Copyright (c) 2010-2012 Kurt McKee <contactme@kurtmckee.org>
  11. Copyright (c) 2002-2008 Mark Pilgrim
  12. All rights reserved.
  13. Redistribution and use in source and binary forms, with or without modification,
  14. are permitted provided that the following conditions are met:
  15. * Redistributions of source code must retain the above copyright notice,
  16. this list of conditions and the following disclaimer.
  17. * Redistributions in binary form must reproduce the above copyright notice,
  18. this list of conditions and the following disclaimer in the documentation
  19. and/or other materials provided with the distribution.
  20. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
  21. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  22. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  23. ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  24. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  25. CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  26. SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  27. INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  28. CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  29. ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  30. POSSIBILITY OF SUCH DAMAGE."""
  31. __author__ = "Mark Pilgrim <http://diveintomark.org/>"
  32. __contributors__ = ["Jason Diamond <http://injektilo.org/>",
  33. "John Beimler <http://john.beimler.org/>",
  34. "Fazal Majid <http://www.majid.info/mylos/weblog/>",
  35. "Aaron Swartz <http://aaronsw.com/>",
  36. "Kevin Marks <http://epeus.blogspot.com/>",
  37. "Sam Ruby <http://intertwingly.net/>",
  38. "Ade Oshineye <http://blog.oshineye.com/>",
  39. "Martin Pool <http://sourcefrog.net/>",
  40. "Kurt McKee <http://kurtmckee.org/>",
  41. "Bernd Schlapsi <https://github.com/brot>",]
  42. # HTTP "User-Agent" header to send to servers when downloading feeds.
  43. # If you are embedding feedparser in a larger application, you should
  44. # change this to your application name and URL.
  45. USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
  46. # HTTP "Accept" header to send to servers when downloading feeds. If you don't
  47. # want to send an Accept header, set this to None.
  48. ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
  49. # List of preferred XML parsers, by SAX driver name. These will be tried first,
  50. # but if they're not installed, Python will keep searching through its own list
  51. # of pre-installed parsers until it finds one that supports everything we need.
  52. PREFERRED_XML_PARSERS = ["drv_libxml2"]
  53. # If you want feedparser to automatically run HTML markup through HTML Tidy, set
  54. # this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
  55. # or utidylib <http://utidylib.berlios.de/>.
  56. TIDY_MARKUP = 0
  57. # List of Python interfaces for HTML Tidy, in order of preference. Only useful
  58. # if TIDY_MARKUP = 1
  59. PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
  60. # If you want feedparser to automatically resolve all relative URIs, set this
  61. # to 1.
  62. RESOLVE_RELATIVE_URIS = 1
  63. # If you want feedparser to automatically sanitize all potentially unsafe
  64. # HTML content, set this to 1.
  65. SANITIZE_HTML = 1
  66. # If you want feedparser to automatically parse microformat content embedded
  67. # in entry contents, set this to 1
  68. PARSE_MICROFORMATS = 1
  69. # ---------- Python 3 modules (make it work if possible) ----------
  70. try:
  71. import rfc822
  72. except ImportError:
  73. from email import _parseaddr as rfc822
  74. try:
  75. # Python 3.1 introduces bytes.maketrans and simultaneously
  76. # deprecates string.maketrans; use bytes.maketrans if possible
  77. _maketrans = bytes.maketrans
  78. except (NameError, AttributeError):
  79. import string
  80. _maketrans = string.maketrans
  81. # base64 support for Atom feeds that contain embedded binary data
  82. try:
  83. import base64, binascii
  84. except ImportError:
  85. base64 = binascii = None
  86. else:
  87. # Python 3.1 deprecates decodestring in favor of decodebytes
  88. _base64decode = getattr(base64, 'decodebytes', base64.decodestring)
  89. # _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
  90. # _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
  91. try:
  92. if bytes is str:
  93. # In Python 2.5 and below, bytes doesn't exist (NameError)
  94. # In Python 2.6 and above, bytes and str are the same type
  95. raise NameError
  96. except NameError:
  97. # Python 2
  98. def _s2bytes(s):
  99. return s
  100. def _l2bytes(l):
  101. return ''.join(map(chr, l))
  102. else:
  103. # Python 3
  104. def _s2bytes(s):
  105. return bytes(s, 'utf8')
  106. def _l2bytes(l):
  107. return bytes(l)
  108. # If you want feedparser to allow all URL schemes, set this to ()
  109. # List culled from Python's urlparse documentation at:
  110. # http://docs.python.org/library/urlparse.html
  111. # as well as from "URI scheme" at Wikipedia:
  112. # https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
  113. # Many more will likely need to be added!
  114. ACCEPTABLE_URI_SCHEMES = (
  115. 'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
  116. 'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
  117. 'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
  118. 'wais',
  119. # Additional common-but-unofficial schemes
  120. 'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
  121. 'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
  122. )
  123. #ACCEPTABLE_URI_SCHEMES = ()
  124. # ---------- required modules (should come with any Python distribution) ----------
  125. import cgi
  126. import codecs
  127. import copy
  128. import datetime
  129. import re
  130. import struct
  131. import time
  132. import types
  133. import urllib
  134. import urllib2
  135. import urlparse
  136. import warnings
  137. from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
  138. try:
  139. from io import BytesIO as _StringIO
  140. except ImportError:
  141. try:
  142. from cStringIO import StringIO as _StringIO
  143. except ImportError:
  144. from StringIO import StringIO as _StringIO
  145. # ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
  146. # gzip is included with most Python distributions, but may not be available if you compiled your own
  147. try:
  148. import gzip
  149. except ImportError:
  150. gzip = None
  151. try:
  152. import zlib
  153. except ImportError:
  154. zlib = None
  155. # If a real XML parser is available, feedparser will attempt to use it. feedparser has
  156. # been tested with the built-in SAX parser and libxml2. On platforms where the
  157. # Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
  158. # versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
  159. try:
  160. import xml.sax
  161. from xml.sax.saxutils import escape as _xmlescape
  162. except ImportError:
  163. _XML_AVAILABLE = 0
  164. def _xmlescape(data,entities={}):
  165. data = data.replace('&', '&amp;')
  166. data = data.replace('>', '&gt;')
  167. data = data.replace('<', '&lt;')
  168. for char, entity in entities:
  169. data = data.replace(char, entity)
  170. return data
  171. else:
  172. try:
  173. xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
  174. except xml.sax.SAXReaderNotAvailable:
  175. _XML_AVAILABLE = 0
  176. else:
  177. _XML_AVAILABLE = 1
  178. # sgmllib is not available by default in Python 3; if the end user doesn't have
  179. # it available then we'll lose illformed XML parsing, content santizing, and
  180. # microformat support (at least while feedparser depends on BeautifulSoup).
  181. try:
  182. import sgmllib
  183. except ImportError:
  184. # This is probably Python 3, which doesn't include sgmllib anymore
  185. _SGML_AVAILABLE = 0
  186. # Mock sgmllib enough to allow subclassing later on
  187. class sgmllib(object):
  188. class SGMLParser(object):
  189. def goahead(self, i):
  190. pass
  191. def parse_starttag(self, i):
  192. pass
  193. else:
  194. _SGML_AVAILABLE = 1
  195. # sgmllib defines a number of module-level regular expressions that are
  196. # insufficient for the XML parsing feedparser needs. Rather than modify
  197. # the variables directly in sgmllib, they're defined here using the same
  198. # names, and the compiled code objects of several sgmllib.SGMLParser
  199. # methods are copied into _BaseHTMLProcessor so that they execute in
  200. # feedparser's scope instead of sgmllib's scope.
  201. charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
  202. tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
  203. attrfind = re.compile(
  204. r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
  205. r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
  206. )
  207. # Unfortunately, these must be copied over to prevent NameError exceptions
  208. entityref = sgmllib.entityref
  209. incomplete = sgmllib.incomplete
  210. interesting = sgmllib.interesting
  211. shorttag = sgmllib.shorttag
  212. shorttagopen = sgmllib.shorttagopen
  213. starttagopen = sgmllib.starttagopen
  214. class _EndBracketRegEx:
  215. def __init__(self):
  216. # Overriding the built-in sgmllib.endbracket regex allows the
  217. # parser to find angle brackets embedded in element attributes.
  218. self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
  219. def search(self, target, index=0):
  220. match = self.endbracket.match(target, index)
  221. if match is not None:
  222. # Returning a new object in the calling thread's context
  223. # resolves a thread-safety.
  224. return EndBracketMatch(match)
  225. return None
  226. class EndBracketMatch:
  227. def __init__(self, match):
  228. self.match = match
  229. def start(self, n):
  230. return self.match.end(n)
  231. endbracket = _EndBracketRegEx()
  232. # iconv_codec provides support for more character encodings.
  233. # It's available from http://cjkpython.i18n.org/
  234. try:
  235. import iconv_codec
  236. except ImportError:
  237. pass
  238. # chardet library auto-detects character encodings
  239. # Download from http://chardet.feedparser.org/
  240. try:
  241. import chardet
  242. except ImportError:
  243. chardet = None
  244. # BeautifulSoup is used to extract microformat content from HTML
  245. # feedparser is tested using BeautifulSoup 3.2.0
  246. # http://www.crummy.com/software/BeautifulSoup/
  247. try:
  248. import BeautifulSoup
  249. except ImportError:
  250. BeautifulSoup = None
  251. PARSE_MICROFORMATS = False
  252. # ---------- don't touch these ----------
  253. class ThingsNobodyCaresAboutButMe(Exception): pass
  254. class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
  255. class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
  256. class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
  257. class UndeclaredNamespace(Exception): pass
  258. SUPPORTED_VERSIONS = {'': u'unknown',
  259. 'rss090': u'RSS 0.90',
  260. 'rss091n': u'RSS 0.91 (Netscape)',
  261. 'rss091u': u'RSS 0.91 (Userland)',
  262. 'rss092': u'RSS 0.92',
  263. 'rss093': u'RSS 0.93',
  264. 'rss094': u'RSS 0.94',
  265. 'rss20': u'RSS 2.0',
  266. 'rss10': u'RSS 1.0',
  267. 'rss': u'RSS (unknown version)',
  268. 'atom01': u'Atom 0.1',
  269. 'atom02': u'Atom 0.2',
  270. 'atom03': u'Atom 0.3',
  271. 'atom10': u'Atom 1.0',
  272. 'atom': u'Atom (unknown version)',
  273. 'cdf': u'CDF',
  274. }
  275. class FeedParserDict(dict):
  276. keymap = {'channel': 'feed',
  277. 'items': 'entries',
  278. 'guid': 'id',
  279. 'date': 'updated',
  280. 'date_parsed': 'updated_parsed',
  281. 'description': ['summary', 'subtitle'],
  282. 'description_detail': ['summary_detail', 'subtitle_detail'],
  283. 'url': ['href'],
  284. 'modified': 'updated',
  285. 'modified_parsed': 'updated_parsed',
  286. 'issued': 'published',
  287. 'issued_parsed': 'published_parsed',
  288. 'copyright': 'rights',
  289. 'copyright_detail': 'rights_detail',
  290. 'tagline': 'subtitle',
  291. 'tagline_detail': 'subtitle_detail'}
  292. def __getitem__(self, key):
  293. if key == 'category':
  294. try:
  295. return dict.__getitem__(self, 'tags')[0]['term']
  296. except IndexError:
  297. raise KeyError, "object doesn't have key 'category'"
  298. elif key == 'enclosures':
  299. norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
  300. return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
  301. elif key == 'license':
  302. for link in dict.__getitem__(self, 'links'):
  303. if link['rel']==u'license' and 'href' in link:
  304. return link['href']
  305. elif key == 'updated':
  306. # Temporarily help developers out by keeping the old
  307. # broken behavior that was reported in issue 310.
  308. # This fix was proposed in issue 328.
  309. if not dict.__contains__(self, 'updated') and \
  310. dict.__contains__(self, 'published'):
  311. warnings.warn("To avoid breaking existing software while "
  312. "fixing issue 310, a temporary mapping has been created "
  313. "from `updated` to `published` if `updated` doesn't "
  314. "exist. This fallback will be removed in a future version "
  315. "of feedparser.", DeprecationWarning)
  316. return dict.__getitem__(self, 'published')
  317. return dict.__getitem__(self, 'updated')
  318. elif key == 'updated_parsed':
  319. if not dict.__contains__(self, 'updated_parsed') and \
  320. dict.__contains__(self, 'published_parsed'):
  321. warnings.warn("To avoid breaking existing software while "
  322. "fixing issue 310, a temporary mapping has been created "
  323. "from `updated_parsed` to `published_parsed` if "
  324. "`updated_parsed` doesn't exist. This fallback will be "
  325. "removed in a future version of feedparser.",
  326. DeprecationWarning)
  327. return dict.__getitem__(self, 'published_parsed')
  328. return dict.__getitem__(self, 'updated_parsed')
  329. else:
  330. realkey = self.keymap.get(key, key)
  331. if isinstance(realkey, list):
  332. for k in realkey:
  333. if dict.__contains__(self, k):
  334. return dict.__getitem__(self, k)
  335. elif dict.__contains__(self, realkey):
  336. return dict.__getitem__(self, realkey)
  337. return dict.__getitem__(self, key)
  338. def __contains__(self, key):
  339. if key in ('updated', 'updated_parsed'):
  340. # Temporarily help developers out by keeping the old
  341. # broken behavior that was reported in issue 310.
  342. # This fix was proposed in issue 328.
  343. return dict.__contains__(self, key)
  344. try:
  345. self.__getitem__(key)
  346. except KeyError:
  347. return False
  348. else:
  349. return True
  350. has_key = __contains__
  351. def get(self, key, default=None):
  352. try:
  353. return self.__getitem__(key)
  354. except KeyError:
  355. return default
  356. def __setitem__(self, key, value):
  357. key = self.keymap.get(key, key)
  358. if isinstance(key, list):
  359. key = key[0]
  360. return dict.__setitem__(self, key, value)
  361. def setdefault(self, key, value):
  362. if key not in self:
  363. self[key] = value
  364. return value
  365. return self[key]
  366. def __getattr__(self, key):
  367. # __getattribute__() is called first; this will be called
  368. # only if an attribute was not already found
  369. try:
  370. return self.__getitem__(key)
  371. except KeyError:
  372. raise AttributeError, "object has no attribute '%s'" % key
  373. def __hash__(self):
  374. return id(self)
  375. _cp1252 = {
  376. 128: unichr(8364), # euro sign
  377. 130: unichr(8218), # single low-9 quotation mark
  378. 131: unichr( 402), # latin small letter f with hook
  379. 132: unichr(8222), # double low-9 quotation mark
  380. 133: unichr(8230), # horizontal ellipsis
  381. 134: unichr(8224), # dagger
  382. 135: unichr(8225), # double dagger
  383. 136: unichr( 710), # modifier letter circumflex accent
  384. 137: unichr(8240), # per mille sign
  385. 138: unichr( 352), # latin capital letter s with caron
  386. 139: unichr(8249), # single left-pointing angle quotation mark
  387. 140: unichr( 338), # latin capital ligature oe
  388. 142: unichr( 381), # latin capital letter z with caron
  389. 145: unichr(8216), # left single quotation mark
  390. 146: unichr(8217), # right single quotation mark
  391. 147: unichr(8220), # left double quotation mark
  392. 148: unichr(8221), # right double quotation mark
  393. 149: unichr(8226), # bullet
  394. 150: unichr(8211), # en dash
  395. 151: unichr(8212), # em dash
  396. 152: unichr( 732), # small tilde
  397. 153: unichr(8482), # trade mark sign
  398. 154: unichr( 353), # latin small letter s with caron
  399. 155: unichr(8250), # single right-pointing angle quotation mark
  400. 156: unichr( 339), # latin small ligature oe
  401. 158: unichr( 382), # latin small letter z with caron
  402. 159: unichr( 376), # latin capital letter y with diaeresis
  403. }
  404. _urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
  405. def _urljoin(base, uri):
  406. uri = _urifixer.sub(r'\1\3', uri)
  407. #try:
  408. if not isinstance(uri, unicode):
  409. uri = uri.decode('utf-8', 'ignore')
  410. uri = urlparse.urljoin(base, uri)
  411. if not isinstance(uri, unicode):
  412. return uri.decode('utf-8', 'ignore')
  413. return uri
  414. #except:
  415. # uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
  416. # return urlparse.urljoin(base, uri)
  417. class _FeedParserMixin:
  418. namespaces = {
  419. '': '',
  420. 'http://backend.userland.com/rss': '',
  421. 'http://blogs.law.harvard.edu/tech/rss': '',
  422. 'http://purl.org/rss/1.0/': '',
  423. 'http://my.netscape.com/rdf/simple/0.9/': '',
  424. 'http://example.com/newformat#': '',
  425. 'http://example.com/necho': '',
  426. 'http://purl.org/echo/': '',
  427. 'uri/of/echo/namespace#': '',
  428. 'http://purl.org/pie/': '',
  429. 'http://purl.org/atom/ns#': '',
  430. 'http://www.w3.org/2005/Atom': '',
  431. 'http://purl.org/rss/1.0/modules/rss091#': '',
  432. 'http://webns.net/mvcb/': 'admin',
  433. 'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
  434. 'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
  435. 'http://media.tangent.org/rss/1.0/': 'audio',
  436. 'http://backend.userland.com/blogChannelModule': 'blogChannel',
  437. 'http://web.resource.org/cc/': 'cc',
  438. 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
  439. 'http://purl.org/rss/1.0/modules/company': 'co',
  440. 'http://purl.org/rss/1.0/modules/content/': 'content',
  441. 'http://my.theinfo.org/changed/1.0/rss/': 'cp',
  442. 'http://purl.org/dc/elements/1.1/': 'dc',
  443. 'http://purl.org/dc/terms/': 'dcterms',
  444. 'http://purl.org/rss/1.0/modules/email/': 'email',
  445. 'http://purl.org/rss/1.0/modules/event/': 'ev',
  446. 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
  447. 'http://freshmeat.net/rss/fm/': 'fm',
  448. 'http://xmlns.com/foaf/0.1/': 'foaf',
  449. 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
  450. 'http://postneo.com/icbm/': 'icbm',
  451. 'http://purl.org/rss/1.0/modules/image/': 'image',
  452. 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
  453. 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
  454. 'http://purl.org/rss/1.0/modules/link/': 'l',
  455. 'http://search.yahoo.com/mrss': 'media',
  456. # Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
  457. 'http://search.yahoo.com/mrss/': 'media',
  458. 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
  459. 'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
  460. 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
  461. 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
  462. 'http://purl.org/rss/1.0/modules/reference/': 'ref',
  463. 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
  464. 'http://purl.org/rss/1.0/modules/search/': 'search',
  465. 'http://purl.org/rss/1.0/modules/slash/': 'slash',
  466. 'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
  467. 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
  468. 'http://hacks.benhammersley.com/rss/streaming/': 'str',
  469. 'http://purl.org/rss/1.0/modules/subscription/': 'sub',
  470. 'http://purl.org/rss/1.0/modules/syndication/': 'sy',
  471. 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
  472. 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
  473. 'http://purl.org/rss/1.0/modules/threading/': 'thr',
  474. 'http://purl.org/rss/1.0/modules/textinput/': 'ti',
  475. 'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
  476. 'http://wellformedweb.org/commentAPI/': 'wfw',
  477. 'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
  478. 'http://www.w3.org/1999/xhtml': 'xhtml',
  479. 'http://www.w3.org/1999/xlink': 'xlink',
  480. 'http://www.w3.org/XML/1998/namespace': 'xml',
  481. }
  482. _matchnamespaces = {}
  483. can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
  484. can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
  485. can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
  486. html_types = [u'text/html', u'application/xhtml+xml']
  487. def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
  488. if not self._matchnamespaces:
  489. for k, v in self.namespaces.items():
  490. self._matchnamespaces[k.lower()] = v
  491. self.feeddata = FeedParserDict() # feed-level data
  492. self.encoding = encoding # character encoding
  493. self.entries = [] # list of entry-level data
  494. self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
  495. self.namespacesInUse = {} # dictionary of namespaces defined by the feed
  496. # the following are used internally to track state;
  497. # this is really out of control and should be refactored
  498. self.infeed = 0
  499. self.inentry = 0
  500. self.incontent = 0
  501. self.intextinput = 0
  502. self.inimage = 0
  503. self.inauthor = 0
  504. self.incontributor = 0
  505. self.inpublisher = 0
  506. self.insource = 0
  507. self.sourcedata = FeedParserDict()
  508. self.contentparams = FeedParserDict()
  509. self._summaryKey = None
  510. self.namespacemap = {}
  511. self.elementstack = []
  512. self.basestack = []
  513. self.langstack = []
  514. self.baseuri = baseuri or u''
  515. self.lang = baselang or None
  516. self.svgOK = 0
  517. self.title_depth = -1
  518. self.depth = 0
  519. if baselang:
  520. self.feeddata['language'] = baselang.replace('_','-')
  521. # A map of the following form:
  522. # {
  523. # object_that_value_is_set_on: {
  524. # property_name: depth_of_node_property_was_extracted_from,
  525. # other_property: depth_of_node_property_was_extracted_from,
  526. # },
  527. # }
  528. self.property_depth_map = {}
  529. def _normalize_attributes(self, kv):
  530. k = kv[0].lower()
  531. v = k in ('rel', 'type') and kv[1].lower() or kv[1]
  532. # the sgml parser doesn't handle entities in attributes, nor
  533. # does it pass the attribute values through as unicode, while
  534. # strict xml parsers do -- account for this difference
  535. if isinstance(self, _LooseFeedParser):
  536. v = v.replace('&amp;', '&')
  537. if not isinstance(v, unicode):
  538. v = v.decode('utf-8')
  539. return (k, v)
  540. def unknown_starttag(self, tag, attrs):
  541. # increment depth counter
  542. self.depth += 1
  543. # normalize attrs
  544. attrs = map(self._normalize_attributes, attrs)
  545. # track xml:base and xml:lang
  546. attrsD = dict(attrs)
  547. baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
  548. if not isinstance(baseuri, unicode):
  549. baseuri = baseuri.decode(self.encoding, 'ignore')
  550. # ensure that self.baseuri is always an absolute URI that
  551. # uses a whitelisted URI scheme (e.g. not `javscript:`)
  552. if self.baseuri:
  553. self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
  554. else:
  555. self.baseuri = _urljoin(self.baseuri, baseuri)
  556. lang = attrsD.get('xml:lang', attrsD.get('lang'))
  557. if lang == '':
  558. # xml:lang could be explicitly set to '', we need to capture that
  559. lang = None
  560. elif lang is None:
  561. # if no xml:lang is specified, use parent lang
  562. lang = self.lang
  563. if lang:
  564. if tag in ('feed', 'rss', 'rdf:RDF'):
  565. self.feeddata['language'] = lang.replace('_','-')
  566. self.lang = lang
  567. self.basestack.append(self.baseuri)
  568. self.langstack.append(lang)
  569. # track namespaces
  570. for prefix, uri in attrs:
  571. if prefix.startswith('xmlns:'):
  572. self.trackNamespace(prefix[6:], uri)
  573. elif prefix == 'xmlns':
  574. self.trackNamespace(None, uri)
  575. # track inline content
  576. if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
  577. if tag in ('xhtml:div', 'div'):
  578. return # typepad does this 10/2007
  579. # element declared itself as escaped markup, but it isn't really
  580. self.contentparams['type'] = u'application/xhtml+xml'
  581. if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
  582. if tag.find(':') <> -1:
  583. prefix, tag = tag.split(':', 1)
  584. namespace = self.namespacesInUse.get(prefix, '')
  585. if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
  586. attrs.append(('xmlns',namespace))
  587. if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
  588. attrs.append(('xmlns',namespace))
  589. if tag == 'svg':
  590. self.svgOK += 1
  591. return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
  592. # match namespaces
  593. if tag.find(':') <> -1:
  594. prefix, suffix = tag.split(':', 1)
  595. else:
  596. prefix, suffix = '', tag
  597. prefix = self.namespacemap.get(prefix, prefix)
  598. if prefix:
  599. prefix = prefix + '_'
  600. # special hack for better tracking of empty textinput/image elements in illformed feeds
  601. if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
  602. self.intextinput = 0
  603. if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
  604. self.inimage = 0
  605. # call special handler (if defined) or default handler
  606. methodname = '_start_' + prefix + suffix
  607. try:
  608. method = getattr(self, methodname)
  609. return method(attrsD)
  610. except AttributeError:
  611. # Since there's no handler or something has gone wrong we explicitly add the element and its attributes
  612. unknown_tag = prefix + suffix
  613. if len(attrsD) == 0:
  614. # No attributes so merge it into the encosing dictionary
  615. return self.push(unknown_tag, 1)
  616. else:
  617. # Has attributes so create it in its own dictionary
  618. context = self._getContext()
  619. context[unknown_tag] = attrsD
  620. def unknown_endtag(self, tag):
  621. # match namespaces
  622. if tag.find(':') <> -1:
  623. prefix, suffix = tag.split(':', 1)
  624. else:
  625. prefix, suffix = '', tag
  626. prefix = self.namespacemap.get(prefix, prefix)
  627. if prefix:
  628. prefix = prefix + '_'
  629. if suffix == 'svg' and self.svgOK:
  630. self.svgOK -= 1
  631. # call special handler (if defined) or default handler
  632. methodname = '_end_' + prefix + suffix
  633. try:
  634. if self.svgOK:
  635. raise AttributeError()
  636. method = getattr(self, methodname)
  637. method()
  638. except AttributeError:
  639. self.pop(prefix + suffix)
  640. # track inline content
  641. if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
  642. # element declared itself as escaped markup, but it isn't really
  643. if tag in ('xhtml:div', 'div'):
  644. return # typepad does this 10/2007
  645. self.contentparams['type'] = u'application/xhtml+xml'
  646. if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
  647. tag = tag.split(':')[-1]
  648. self.handle_data('</%s>' % tag, escape=0)
  649. # track xml:base and xml:lang going out of scope
  650. if self.basestack:
  651. self.basestack.pop()
  652. if self.basestack and self.basestack[-1]:
  653. self.baseuri = self.basestack[-1]
  654. if self.langstack:
  655. self.langstack.pop()
  656. if self.langstack: # and (self.langstack[-1] is not None):
  657. self.lang = self.langstack[-1]
  658. self.depth -= 1
  659. def handle_charref(self, ref):
  660. # called for each character reference, e.g. for '&#160;', ref will be '160'
  661. if not self.elementstack:
  662. return
  663. ref = ref.lower()
  664. if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
  665. text = '&#%s;' % ref
  666. else:
  667. if ref[0] == 'x':
  668. c = int(ref[1:], 16)
  669. else:
  670. c = int(ref)
  671. text = unichr(c).encode('utf-8')
  672. self.elementstack[-1][2].append(text)
  673. def handle_entityref(self, ref):
  674. # called for each entity reference, e.g. for '&copy;', ref will be 'copy'
  675. if not self.elementstack:
  676. return
  677. if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
  678. text = '&%s;' % ref
  679. elif ref in self.entities:
  680. text = self.entities[ref]
  681. if text.startswith('&#') and text.endswith(';'):
  682. return self.handle_entityref(text)
  683. else:
  684. try:
  685. name2codepoint[ref]
  686. except KeyError:
  687. text = '&%s;' % ref
  688. else:
  689. text = unichr(name2codepoint[ref]).encode('utf-8')
  690. self.elementstack[-1][2].append(text)
  691. def handle_data(self, text, escape=1):
  692. # called for each block of plain text, i.e. outside of any tag and
  693. # not containing any character or entity references
  694. if not self.elementstack:
  695. return
  696. if escape and self.contentparams.get('type') == u'application/xhtml+xml':
  697. text = _xmlescape(text)
  698. self.elementstack[-1][2].append(text)
  699. def handle_comment(self, text):
  700. # called for each comment, e.g. <!-- insert message here -->
  701. pass
  702. def handle_pi(self, text):
  703. # called for each processing instruction, e.g. <?instruction>
  704. pass
  705. def handle_decl(self, text):
  706. pass
  707. def parse_declaration(self, i):
  708. # override internal declaration handler to handle CDATA blocks
  709. if self.rawdata[i:i+9] == '<![CDATA[':
  710. k = self.rawdata.find(']]>', i)
  711. if k == -1:
  712. # CDATA block began but didn't finish
  713. k = len(self.rawdata)
  714. return k
  715. self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
  716. return k+3
  717. else:
  718. k = self.rawdata.find('>', i)
  719. if k >= 0:
  720. return k+1
  721. else:
  722. # We have an incomplete CDATA block.
  723. return k
  724. def mapContentType(self, contentType):
  725. contentType = contentType.lower()
  726. if contentType == 'text' or contentType == 'plain':
  727. contentType = u'text/plain'
  728. elif contentType == 'html':
  729. contentType = u'text/html'
  730. elif contentType == 'xhtml':
  731. contentType = u'application/xhtml+xml'
  732. return contentType
  733. def trackNamespace(self, prefix, uri):
  734. loweruri = uri.lower()
  735. if not self.version:
  736. if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
  737. self.version = u'rss090'
  738. elif loweruri == 'http://purl.org/rss/1.0/':
  739. self.version = u'rss10'
  740. elif loweruri == 'http://www.w3.org/2005/atom':
  741. self.version = u'atom10'
  742. if loweruri.find(u'backend.userland.com/rss') <> -1:
  743. # match any backend.userland.com namespace
  744. uri = u'http://backend.userland.com/rss'
  745. loweruri = uri
  746. if loweruri in self._matchnamespaces:
  747. self.namespacemap[prefix] = self._matchnamespaces[loweruri]
  748. self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
  749. else:
  750. self.namespacesInUse[prefix or ''] = uri
  751. def resolveURI(self, uri):
  752. return _urljoin(self.baseuri or u'', uri)
  753. def decodeEntities(self, element, data):
  754. return data
  755. def strattrs(self, attrs):
  756. return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'&quot;'})) for t in attrs])
  757. def push(self, element, expectingText):
  758. self.elementstack.append([element, expectingText, []])
  759. def pop(self, element, stripWhitespace=1):
  760. if not self.elementstack:
  761. return
  762. if self.elementstack[-1][0] != element:
  763. return
  764. element, expectingText, pieces = self.elementstack.pop()
  765. if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
  766. # remove enclosing child element, but only if it is a <div> and
  767. # only if all the remaining content is nested underneath it.
  768. # This means that the divs would be retained in the following:
  769. # <div>foo</div><div>bar</div>
  770. while pieces and len(pieces)>1 and not pieces[-1].strip():
  771. del pieces[-1]
  772. while pieces and len(pieces)>1 and not pieces[0].strip():
  773. del pieces[0]
  774. if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
  775. depth = 0
  776. for piece in pieces[:-1]:
  777. if piece.startswith('</'):
  778. depth -= 1
  779. if depth == 0:
  780. break
  781. elif piece.startswith('<') and not piece.endswith('/>'):
  782. depth += 1
  783. else:
  784. pieces = pieces[1:-1]
  785. # Ensure each piece is a str for Python 3
  786. for (i, v) in enumerate(pieces):
  787. if not isinstance(v, unicode):
  788. pieces[i] = v.decode('utf-8')
  789. output = u''.join(pieces)
  790. if stripWhitespace:
  791. output = output.strip()
  792. if not expectingText:
  793. return output
  794. # decode base64 content
  795. if base64 and self.contentparams.get('base64', 0):
  796. try:
  797. output = _base64decode(output)
  798. except binascii.Error:
  799. pass
  800. except binascii.Incomplete:
  801. pass
  802. except TypeError:
  803. # In Python 3, base64 takes and outputs bytes, not str
  804. # This may not be the most correct way to accomplish this
  805. output = _base64decode(output.encode('utf-8')).decode('utf-8')
  806. # resolve relative URIs
  807. if (element in self.can_be_relative_uri) and output:
  808. output = self.resolveURI(output)
  809. # decode entities within embedded markup
  810. if not self.contentparams.get('base64', 0):
  811. output = self.decodeEntities(element, output)
  812. # some feed formats require consumers to guess
  813. # whether the content is html or plain text
  814. if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
  815. if self.lookslikehtml(output):
  816. self.contentparams['type'] = u'text/html'
  817. # remove temporary cruft from contentparams
  818. try:
  819. del self.contentparams['mode']
  820. except KeyError:
  821. pass
  822. try:
  823. del self.contentparams['base64']
  824. except KeyError:
  825. pass
  826. is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
  827. # resolve relative URIs within embedded markup
  828. if is_htmlish and RESOLVE_RELATIVE_URIS:
  829. if element in self.can_contain_relative_uris:
  830. output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
  831. # parse microformats
  832. # (must do this before sanitizing because some microformats
  833. # rely on elements that we sanitize)
  834. if PARSE_MICROFORMATS and is_htmlish and element in ['content', 'description', 'summary']:
  835. mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
  836. if mfresults:
  837. for tag in mfresults.get('tags', []):
  838. self._addTag(tag['term'], tag['scheme'], tag['label'])
  839. for enclosure in mfresults.get('enclosures', []):
  840. self._start_enclosure(enclosure)
  841. for xfn in mfresults.get('xfn', []):
  842. self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
  843. vcard = mfresults.get('vcard')
  844. if vcard:
  845. self._getContext()['vcard'] = vcard
  846. # sanitize embedded markup
  847. if is_htmlish and SANITIZE_HTML:
  848. if element in self.can_contain_dangerous_markup:
  849. output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
  850. if self.encoding and not isinstance(output, unicode):
  851. output = output.decode(self.encoding, 'ignore')
  852. # address common error where people take data that is already
  853. # utf-8, presume that it is iso-8859-1, and re-encode it.
  854. if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
  855. try:
  856. output = output.encode('iso-8859-1').decode('utf-8')
  857. except (UnicodeEncodeError, UnicodeDecodeError):
  858. pass
  859. # map win-1252 extensions to the proper code points
  860. if isinstance(output, unicode):
  861. output = output.translate(_cp1252)
  862. # categories/tags/keywords/whatever are handled in _end_category
  863. if element == 'category':
  864. return output
  865. if element == 'title' and -1 < self.title_depth <= self.depth:
  866. return output
  867. # store output in appropriate place(s)
  868. if self.inentry and not self.insource:
  869. if element == 'content':
  870. self.entries[-1].setdefault(element, [])
  871. contentparams = copy.deepcopy(self.contentparams)
  872. contentparams['value'] = output
  873. self.entries[-1][element].append(contentparams)
  874. elif element == 'link':
  875. if not self.inimage:
  876. # query variables in urls in link elements are improperly
  877. # converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
  878. # unhandled character references. fix this special case.
  879. output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
  880. self.entries[-1][element] = output
  881. if output:
  882. self.entries[-1]['links'][-1]['href'] = output
  883. else:
  884. if element == 'description':
  885. element = 'summary'
  886. old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
  887. if old_value_depth is None or self.depth <= old_value_depth:
  888. self.property_depth_map[self.entries[-1]][element] = self.depth
  889. self.entries[-1][element] = output
  890. if self.incontent:
  891. contentparams = copy.deepcopy(self.contentparams)
  892. contentparams['value'] = output
  893. self.entries[-1][element + '_detail'] = contentparams
  894. elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
  895. context = self._getContext()
  896. if element == 'description':
  897. element = 'subtitle'
  898. context[element] = output
  899. if element == 'link':
  900. # fix query variables; see above for the explanation
  901. output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
  902. context[element] = output
  903. context['links'][-1]['href'] = output
  904. elif self.incontent:
  905. contentparams = copy.deepcopy(self.contentparams)
  906. contentparams['value'] = output
  907. context[element + '_detail'] = contentparams
  908. return output
  909. def pushContent(self, tag, attrsD, defaultContentType, expectingText):
  910. self.incontent += 1
  911. if self.lang:
  912. self.lang=self.lang.replace('_','-')
  913. self.contentparams = FeedParserDict({
  914. 'type': self.mapContentType(attrsD.get('type', defaultContentType)),
  915. 'language': self.lang,
  916. 'base': self.baseuri})
  917. self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
  918. self.push(tag, expectingText)
  919. def popContent(self, tag):
  920. value = self.pop(tag)
  921. self.incontent -= 1
  922. self.contentparams.clear()
  923. return value
  924. # a number of elements in a number of RSS variants are nominally plain
  925. # text, but this is routinely ignored. This is an attempt to detect
  926. # the most common cases. As false positives often result in silent
  927. # data loss, this function errs on the conservative side.
  928. @staticmethod
  929. def lookslikehtml(s):
  930. # must have a close tag or an entity reference to qualify
  931. if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
  932. return
  933. # all tags must be in a restricted subset of valid HTML tags
  934. if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
  935. re.findall(r'</?(\w+)',s)):
  936. return
  937. # all entities must have been defined as valid HTML entities
  938. if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
  939. return
  940. return 1
  941. def _mapToStandardPrefix(self, name):
  942. colonpos = name.find(':')
  943. if colonpos <> -1:
  944. prefix = name[:colonpos]
  945. suffix = name[colonpos+1:]
  946. prefix = self.namespacemap.get(prefix, prefix)
  947. name = prefix + ':' + suffix
  948. return name
  949. def _getAttribute(self, attrsD, name):
  950. return attrsD.get(self._mapToStandardPrefix(name))
  951. def _isBase64(self, attrsD, contentparams):
  952. if attrsD.get('mode', '') == 'base64':
  953. return 1
  954. if self.contentparams['type'].startswith(u'text/'):
  955. return 0
  956. if self.contentparams['type'].endswith(u'+xml'):
  957. return 0
  958. if self.contentparams['type'].endswith(u'/xml'):
  959. return 0
  960. return 1
  961. def _itsAnHrefDamnIt(self, attrsD):
  962. href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
  963. if href:
  964. try:
  965. del attrsD['url']
  966. except KeyError:
  967. pass
  968. try:
  969. del attrsD['uri']
  970. except KeyError:
  971. pass
  972. attrsD['href'] = href
  973. return attrsD
  974. def _save(self, key, value, overwrite=False):
  975. context = self._getContext()
  976. if overwrite:
  977. context[key] = value
  978. else:
  979. context.setdefault(key, value)
  980. def _start_rss(self, attrsD):
  981. versionmap = {'0.91': u'rss091u',
  982. '0.92': u'rss092',
  983. '0.93': u'rss093',
  984. '0.94': u'rss094'}
  985. #If we're here then this is an RSS feed.
  986. #If we don't have a version or have a version that starts with something
  987. #other than RSS then there's been a mistake. Correct it.
  988. if not self.version or not self.version.startswith(u'rss'):
  989. attr_version = attrsD.get('version', '')
  990. version = versionmap.get(attr_version)
  991. if version:
  992. self.version = version
  993. elif attr_version.startswith('2.'):
  994. self.version = u'rss20'
  995. else:
  996. self.version = u'rss'
  997. def _start_channel(self, attrsD):
  998. self.infeed = 1
  999. self._cdf_common(attrsD)
  1000. def _cdf_common(self, attrsD):
  1001. if 'lastmod' in attrsD:
  1002. self._start_modified({})
  1003. self.elementstack[-1][-1] = attrsD['lastmod']
  1004. self._end_modified()
  1005. if 'href' in attrsD:
  1006. self._start_link({})
  1007. self.elementstack[-1][-1] = attrsD['href']
  1008. self._end_link()
  1009. def _start_feed(self, attrsD):
  1010. self.infeed = 1
  1011. versionmap = {'0.1': u'atom01',
  1012. '0.2': u'atom02',
  1013. '0.3': u'atom03'}
  1014. if not self.version:
  1015. attr_version = attrsD.get('version')
  1016. version = versionmap.get(attr_version)
  1017. if version:
  1018. self.version = version
  1019. else:
  1020. self.version = u'atom'
  1021. def _end_channel(self):
  1022. self.infeed = 0
  1023. _end_feed = _end_channel
  1024. def _start_image(self, attrsD):
  1025. context = self._getContext()
  1026. if not self.inentry:
  1027. context.setdefault('image', FeedParserDict())
  1028. self.inimage = 1
  1029. self.title_depth = -1
  1030. self.push('image', 0)
  1031. def _end_image(self):
  1032. self.pop('image')
  1033. self.inimage = 0
  1034. def _start_textinput(self, attrsD):
  1035. context = self._getContext()
  1036. context.setdefault('textinput', FeedParserDict())
  1037. self.intextinput = 1
  1038. self.title_depth = -1
  1039. self.push('textinput', 0)
  1040. _start_textInput = _start_textinput
  1041. def _end_textinput(self):
  1042. self.pop('textinput')
  1043. self.intextinput = 0
  1044. _end_textInput = _end_textinput
  1045. def _start_author(self, attrsD):
  1046. self.inauthor = 1
  1047. self.push('author', 1)
  1048. # Append a new FeedParserDict when expecting an author
  1049. context = self._getContext()
  1050. context.setdefault('authors', [])
  1051. context['authors'].append(FeedParserDict())
  1052. _start_managingeditor = _start_author
  1053. _start_dc_author = _start_author
  1054. _start_dc_creator = _start_author
  1055. _start_itunes_author = _start_author
  1056. def _end_author(self):
  1057. self.pop('author')
  1058. self.inauthor = 0
  1059. self._sync_author_detail()
  1060. _end_managingeditor = _end_author
  1061. _end_dc_author = _end_author
  1062. _end_dc_creator = _end_author
  1063. _end_itunes_author = _end_author
  1064. def _start_itunes_owner(self, attrsD):
  1065. self.inpublisher = 1
  1066. self.push('publisher', 0)
  1067. def _end_itunes_owner(self):
  1068. self.pop('publisher')
  1069. self.inpublisher = 0
  1070. self._sync_author_detail('publisher')
  1071. def _start_contributor(self, attrsD):
  1072. self.incontributor = 1
  1073. context = self._getContext()
  1074. context.setdefault('contributors', [])
  1075. context['contributors'].append(FeedParserDict())
  1076. self.push('contributor', 0)
  1077. def _end_contributor(self):
  1078. self.pop('contributor')
  1079. self.incontributor = 0
  1080. def _start_dc_contributor(self, attrsD):
  1081. self.incontributor = 1
  1082. context = self._getContext()
  1083. context.setdefault('contributors', [])
  1084. context['contributors'].append(FeedParserDict())
  1085. self.push('name', 0)
  1086. def _end_dc_contributor(self):
  1087. self._end_name()
  1088. self.incontributor = 0
  1089. def _start_name(self, attrsD):
  1090. self.push('name', 0)
  1091. _start_itunes_name = _start_name
  1092. def _end_name(self):
  1093. value = self.pop('name')
  1094. if self.inpublisher:
  1095. self._save_author('name', value, 'publisher')
  1096. elif self.inauthor:
  1097. self._save_author('name', value)
  1098. elif self.incontributor:
  1099. self._save_contributor('name', value)
  1100. elif self.intextinput:
  1101. context = self._getContext()
  1102. context['name'] = value
  1103. _end_itunes_name = _end_name
  1104. def _start_width(self, attrsD):
  1105. self.push('width', 0)
  1106. def _end_width(self):
  1107. value = self.pop('width')
  1108. try:
  1109. value = int(value)
  1110. except ValueError:
  1111. value = 0
  1112. if self.inimage:
  1113. context = self._getContext()
  1114. context['width'] = value
  1115. def _start_height(self, attrsD):
  1116. self.push('height', 0)
  1117. def _end_height(self):
  1118. value = self.pop('height')
  1119. try:
  1120. value = int(value)
  1121. except ValueError:
  1122. value = 0
  1123. if self.inimage:
  1124. context = self._getContext()
  1125. context['height'] = value
  1126. def _start_url(self, attrsD):
  1127. self.push('href', 1)
  1128. _start_homepage = _start_url
  1129. _start_uri = _start_url
  1130. def _end_url(self):
  1131. value = self.pop('href')
  1132. if self.inauthor:
  1133. self._save_author('href', value)
  1134. elif self.incontributor:
  1135. self._save_contributor('href', value)
  1136. _end_homepage = _end_url
  1137. _end_uri = _end_url
  1138. def _start_email(self, attrsD):
  1139. self.push('email', 0)
  1140. _start_itunes_email = _start_email
  1141. def _end_email(self):
  1142. value = self.pop('email')
  1143. if self.inpublisher:
  1144. self._save_author('email', value, 'publisher')
  1145. elif self.inauthor:
  1146. self._save_author('email', value)
  1147. elif self.incontributor:
  1148. self._save_contributor('email', value)
  1149. _end_itunes_email = _end_email
  1150. def _getContext(self):
  1151. if self.insource:
  1152. context = self.sourcedata
  1153. elif self.inimage and 'image' in self.feeddata:
  1154. context = self.feeddata['image']
  1155. elif self.intextinput:
  1156. context = self.feeddata['textinput']
  1157. elif self.inentry:
  1158. context = self.entries[-1]
  1159. else:
  1160. context = self.feeddata
  1161. return context
  1162. def _save_author(self, key, value, prefix='author'):
  1163. context = self._getContext()
  1164. context.setdefault(prefix + '_detail', FeedParserDict())
  1165. context[prefix + '_detail'][key] = value
  1166. self._sync_author_detail()
  1167. context.setdefault('authors', [FeedParserDict()])
  1168. context['authors'][-1][key] = value
  1169. def _save_contributor(self, key, value):
  1170. context = self._getContext()
  1171. context.setdefault('contributors', [FeedParserDict()])
  1172. context['contributors'][-1][key] = value
  1173. def _sync_author_detail(self, key='author'):
  1174. context = self._getContext()
  1175. detail = context.get('%s_detail' % key)
  1176. if detail:
  1177. name = detail.get('name')
  1178. email = detail.get('email')
  1179. if name and email:
  1180. context[key] = u'%s (%s)' % (name, email)
  1181. elif name:
  1182. context[key] = name
  1183. elif email:
  1184. context[key] = email
  1185. else:
  1186. author, email = context.get(key), None
  1187. if not author:
  1188. return
  1189. emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
  1190. if emailmatch:
  1191. email = emailmatch.group(0)
  1192. # probably a better way to do the following, but it passes all the tests
  1193. author = author.replace(email, u'')
  1194. author = author.replace(u'()', u'')
  1195. author = author.replace(u'<>', u'')
  1196. author = author.replace(u'&lt;&gt;', u'')
  1197. author = author.strip()
  1198. if author and (author[0] == u'('):
  1199. author = author[1:]
  1200. if author and (author[-1] == u')'):
  1201. author = author[:-1]
  1202. author = author.strip()
  1203. if author or email:
  1204. context.setdefault('%s_detail' % key, FeedParserDict())
  1205. if author:
  1206. context['%s_detail' % key]['name'] = author
  1207. if email:
  1208. context['%s_detail' % key]['email'] = email
  1209. def _start_subtitle(self, attrsD):
  1210. self.pushContent('subtitle', attrsD, u'text/plain', 1)
  1211. _start_tagline = _start_subtitle
  1212. _start_itunes_subtitle = _start_subtitle
  1213. def _end_subtitle(self):
  1214. self.popContent('subtitle')
  1215. _end_tagline = _end_subtitle
  1216. _end_itunes_subtitle = _end_subtitle
  1217. def _start_rights(self, attrsD):
  1218. self.pushContent('rights', attrsD, u'text/plain', 1)
  1219. _start_dc_rights = _start_rights
  1220. _start_copyright = _start_rights
  1221. def _end_rights(self):
  1222. self.popContent('rights')
  1223. _end_dc_rights = _end_rights
  1224. _end_copyright = _end_rights
  1225. def _start_item(self, attrsD):
  1226. self.entries.append(FeedParserDict())
  1227. self.push('item', 0)
  1228. self.inentry = 1
  1229. self.guidislink = 0
  1230. self.title_depth = -1
  1231. id = self._getAttribute(attrsD, 'rdf:about')
  1232. if id:
  1233. context = self._getContext()
  1234. context['id'] = id
  1235. self._cdf_common(attrsD)
  1236. _start_entry = _start_item
  1237. def _end_item(self):
  1238. self.pop('item')
  1239. self.inentry = 0
  1240. _end_entry = _end_item
  1241. def _start_dc_language(self, attrsD):
  1242. self.push('language', 1)
  1243. _start_language = _start_dc_language
  1244. def _end_dc_language(self):
  1245. self.lang = self.pop('language')
  1246. _end_language = _end_dc_language
  1247. def _start_dc_publisher(self, attrsD):
  1248. self.push('publisher', 1)
  1249. _start_webmaster = _start_dc_publisher
  1250. def _end_dc_publisher(self):
  1251. self.pop('publisher')
  1252. self._sync_author_detail('publisher')
  1253. _end_webmaster = _end_dc_publisher
  1254. def _start_published(self, attrsD):
  1255. self.push('published', 1)
  1256. _start_dcterms_issued = _start_published
  1257. _start_issued = _start_published
  1258. _start_pubdate = _start_published
  1259. def _end_published(self):
  1260. value = self.pop('published')
  1261. self._save('published_parsed', _parse_date(value), overwrite=True)
  1262. _end_dcterms_issued = _end_published
  1263. _end_issued = _end_published
  1264. _end_pubdate = _end_published
  1265. def _start_updated(self, attrsD):
  1266. self.push('updated', 1)
  1267. _start_modified = _start_updated
  1268. _start_dcterms_modified = _start_updated
  1269. _start_dc_date = _start_updated
  1270. _start_lastbuilddate = _start_updated
  1271. def _end_updated(self):
  1272. value = self.pop('updated')
  1273. parsed_value = _parse_date(value)
  1274. self._save('updated_parsed', parsed_value, overwrite=True)
  1275. _end_modified = _end_updated
  1276. _end_dcterms_modified = _end_updated
  1277. _end_dc_date = _end_updated
  1278. _end_lastbuilddate = _end_updated
  1279. def _start_created(self, attrsD):
  1280. self.push('created', 1)
  1281. _start_dcterms_created = _start_created
  1282. def _end_created(self):
  1283. value = self.pop('created')
  1284. self._save('created_parsed', _parse_date(value), overwrite=True)
  1285. _end_dcterms_created = _end_created
  1286. def _start_expirationdate(self, attrsD):
  1287. self.push('expired', 1)
  1288. def _end_expirationdate(self):
  1289. self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
  1290. def _start_cc_license(self, attrsD):
  1291. context = self._getContext()
  1292. value = self._getAttribute(attrsD, 'rdf:resource')
  1293. attrsD = FeedParserDict()
  1294. attrsD['rel'] = u'license'
  1295. if value:
  1296. attrsD['href']=value
  1297. context.setdefault('links', []).append(attrsD)
  1298. def _start_creativecommons_license(self, attrsD):
  1299. self.push('license', 1)
  1300. _start_creativeCommons_license = _start_creativecommons_license
  1301. def _end_creativecommons_license(self):
  1302. value = self.pop('license')
  1303. context = self._getContext()
  1304. attrsD = FeedParserDict()
  1305. attrsD['rel'] = u'license'
  1306. if value:
  1307. attrsD['href'] = value
  1308. context.setdefault('links', []).append(attrsD)
  1309. del context['license']
  1310. _end_creativeCommons_license = _end_creativecommons_license
  1311. def _addXFN(self, relationships, href, name):
  1312. context = self._getContext()
  1313. xfn = context.setdefault('xfn', [])
  1314. value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
  1315. if value not in xfn:
  1316. xfn.append(value)
  1317. def _addTag(self, term, scheme, label):
  1318. context = self._getContext()
  1319. tags = context.setdefault('tags', [])
  1320. if (not term) and (not scheme) and (not label):
  1321. return
  1322. value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
  1323. if value not in tags:
  1324. tags.append(value)
  1325. def _start_category(self, attrsD):
  1326. term = attrsD.get('term')
  1327. scheme = attrsD.get('scheme', attrsD.get('domain'))
  1328. label = attrsD.get('label')
  1329. self._addTag(term, scheme, label)
  1330. self.push('category', 1)
  1331. _start_dc_subject = _start_category
  1332. _start_keywords = _start_category
  1333. def _start_media_category(self, attrsD):
  1334. attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
  1335. self._start_category(attrsD)
  1336. def _end_itunes_keywords(self):
  1337. for term in self.pop('itunes_keywords').split(','):
  1338. if term.strip():
  1339. self._addTag(term.strip(), u'http://www.itunes.com/', None)
  1340. def _start_itunes_category(self, attrsD):
  1341. self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
  1342. self.push('category', 1)
  1343. def _end_category(self):
  1344. value = self.pop('category')
  1345. if not value:
  1346. return
  1347. context = self._getContext()
  1348. tags = context['tags']
  1349. if value and len(tags) and not tags[-1]['term']:
  1350. tags[-1]['term'] = value
  1351. else:
  1352. self._addTag(value, None, None)
  1353. _end_dc_subject = _end_category
  1354. _end_keywords = _end_category
  1355. _end_itunes_category = _end_category
  1356. _end_media_category = _end_category
  1357. def _start_cloud(self, attrsD):
  1358. self._getContext()['cloud'] = FeedParserDict(attrsD)
  1359. def _start_link(self, attrsD):
  1360. attrsD.setdefault('rel', u'alternate')
  1361. if attrsD['rel'] == u'self':
  1362. attrsD.setdefault('type', u'application/atom+xml')
  1363. else:
  1364. attrsD.setdefault('type', u'text/html')
  1365. context = self._getContext()
  1366. attrsD = self._itsAnHrefDamnIt(attrsD)
  1367. if 'href' in attrsD:
  1368. attrsD['href'] = self.resolveURI(attrsD['href'])
  1369. expectingText = self.infeed or self.inentry or self.insource
  1370. context.setdefault('links', [])
  1371. if not (self.inentry and self.inimage):
  1372. context['links'].append(FeedParserDict(attrsD))
  1373. if 'href' in attrsD:
  1374. expectingText = 0
  1375. if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
  1376. context['link'] = attrsD['href']
  1377. else:
  1378. self.push('link', expectingText)
  1379. def _end_link(self):
  1380. value = self.pop('link')
  1381. def _start_guid(self, attrsD):
  1382. self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
  1383. self.push('id', 1)
  1384. _start_id = _start_guid
  1385. def _end_guid(self):
  1386. value = self.pop('id')
  1387. self._save('guidislink', self.guidislink and 'link' not in self._getContext())
  1388. if self.guidislink:
  1389. # guid acts as link, but only if 'ispermalink' is not present or is 'true',
  1390. # and only if the item doesn't already have a link element
  1391. self._save('link', value)
  1392. _end_id = _end_guid
  1393. def _start_title(self, attrsD):
  1394. if self.svgOK:
  1395. return self.unknown_starttag('title', attrsD.items())
  1396. self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
  1397. _start_dc_title = _start_title
  1398. _start_media_title = _start_title
  1399. def _end_title(self):
  1400. if self.svgOK:
  1401. return
  1402. value = self.popContent('title')
  1403. if not value:
  1404. return
  1405. self.title_depth = self.depth
  1406. _end_dc_title = _end_title
  1407. def _end_media_title(self):
  1408. title_depth = self.title_depth
  1409. self._end_title()
  1410. self.title_depth = title_depth
  1411. def _start_description(self, attrsD):
  1412. context = self._getContext()
  1413. if 'summary' in context:
  1414. self._summaryKey = 'content'
  1415. self._start_content(attrsD)
  1416. else:
  1417. self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
  1418. _start_dc_description = _start_description
  1419. def _start_abstract(self, attrsD):
  1420. self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
  1421. def _end_description(self):
  1422. if self._summaryKey == 'content':
  1423. self._end_content()
  1424. else:
  1425. value = self.popContent('description')
  1426. self._summaryKey = None
  1427. _end_abstract = _end_description
  1428. _end_dc_description = _end_description
  1429. def _start_info(self, attrsD):
  1430. self.pushContent('info', attrsD, u'text/plain', 1)
  1431. _start_feedburner_browserfriendly = _start_info
  1432. def _end_info(self):
  1433. self.popContent('info')
  1434. _end_feedburner_browserfriendly = _end_info
  1435. def _start_generator(self, attrsD):
  1436. if attrsD:
  1437. attrsD = self._itsAnHrefDamnIt(attrsD)
  1438. if 'href' in attrsD:
  1439. attrsD['href'] = self.resolveURI(attrsD['href'])
  1440. self._getContext()['generator_detail'] = FeedParserDict(attrsD)
  1441. self.push('generator', 1)
  1442. def _end_generator(self):
  1443. value = self.pop('generator')
  1444. context = self._getContext()
  1445. if 'generator_detail' in context:
  1446. context['generator_detail']['name'] = value
  1447. def _start_admin_generatoragent(self, attrsD):
  1448. self.push('generator', 1)
  1449. value = self._getAttribute(attrsD, 'rdf:resource')
  1450. if value:
  1451. self.elementstack[-1][2].append(value)
  1452. self.pop('generator')
  1453. self._getContext()['generator_detail'] = FeedParserDict({'href': value})
  1454. def _start_admin_errorreportsto(self, attrsD):
  1455. self.push('errorreportsto', 1)
  1456. value = self._getAttribute(attrsD, 'rdf:resource')
  1457. if value:
  1458. self.elementstack[-1][2].append(value)
  1459. self.pop('errorreportsto')
  1460. def _start_summary(self, attrsD):
  1461. context = self._getContext()
  1462. if 'summary' in context:
  1463. self._summaryKey = 'content'
  1464. self._start_content(attrsD)
  1465. else:
  1466. self._summaryKey = 'summary'
  1467. self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
  1468. _start_itunes_summary = _start_summary
  1469. def _end_summary(self):
  1470. if self._summaryKey == 'content':
  1471. self._end_content()
  1472. else:
  1473. self.popContent(self._summaryKey or 'summary')
  1474. self._summaryKey = None
  1475. _end_itunes_summary = _end_summary
  1476. def _start_enclosure(self, attrsD):
  1477. attrsD = self._itsAnHrefDamnIt(attrsD)
  1478. context = self._getContext()
  1479. attrsD['rel'] = u'enclosure'
  1480. context.setdefault('links', []).append(FeedParserDict(attrsD))
  1481. def _start_source(self, attrsD):
  1482. if 'url' in attrsD:
  1483. # This means that we're processing a source element from an RSS 2.0 feed
  1484. self.sourcedata['href'] = attrsD[u'url']
  1485. self.push('source', 1)
  1486. self.insource = 1
  1487. self.title_depth = -1
  1488. def _end_source(self):
  1489. self.insource = 0
  1490. value = self.pop('source')
  1491. if value:
  1492. self.sourcedata['title'] = value
  1493. self._getContext()['source'] = copy.deepcopy(self.sourcedata)
  1494. self.sourcedata.clear()
  1495. def _start_content(self, attrsD):
  1496. self.pushContent('content', attrsD, u'text/plain', 1)
  1497. src = attrsD.get('src')
  1498. if src:
  1499. self.contentparams['src'] = src
  1500. self.push('content', 1)
  1501. def _start_body(self, attrsD):
  1502. self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
  1503. _start_xhtml_body = _start_body
  1504. def _start_content_encoded(self, attrsD):
  1505. self.pushContent('content', attrsD, u'text/html', 1)
  1506. _start_fullitem = _start_content_encoded
  1507. def _end_content(self):
  1508. copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
  1509. value = self.popContent('content')
  1510. if copyToSummary:
  1511. self._save('summary', value)
  1512. _end_body = _end_content
  1513. _end_xhtml_body = _end_content
  1514. _end_content_encoded = _end_content
  1515. _end_fullitem = _end_content
  1516. def _start_itunes_image(self, attrsD):
  1517. self.push('itunes_image', 0)
  1518. if attrsD.get('href'):
  1519. self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
  1520. elif attrsD.get('url'):
  1521. self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
  1522. _start_itunes_link = _start_itunes_image
  1523. def _end_itunes_block(self):
  1524. value = self.pop('itunes_block', 0)
  1525. self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
  1526. def _end_itunes_explicit(self):
  1527. value = self.pop('itunes_explicit', 0)
  1528. # Convert 'yes' -> True, 'clean' to False, and any other value to None
  1529. # False and None both evaluate as False, so the difference can be ignored
  1530. # by applications that only need to know if the content is explicit.
  1531. self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
  1532. def _start_media_content(self, attrsD):
  1533. context = self._getContext()
  1534. context.setdefault('media_content', [])
  1535. context['media_content'].append(attrsD)
  1536. def _start_media_thumbnail(self, attrsD):
  1537. context = self._getContext()
  1538. context.setdefault('media_thumbnail', [])
  1539. self.push('url', 1) # new
  1540. context['media_thumbnail'].append(attrsD)
  1541. def _end_media_thumbnail(self):
  1542. url = self.pop('url')
  1543. context = self._getContext()
  1544. if url != None and len(url.strip()) != 0:
  1545. if 'url' not in context['media_thumbnail'][-1]:
  1546. context['media_thumbnail'][-1]['url'] = url
  1547. def _start_media_player(self, attrsD):
  1548. self.push('media_player', 0)
  1549. self._getContext()['media_player'] = FeedParserDict(attrsD)
  1550. def _end_media_player(self):
  1551. value = self.pop('media_player')
  1552. context = self._getContext()
  1553. context['media_player']['content'] = value
  1554. def _start_newlocation(self, attrsD):
  1555. self.push('newlocation', 1)
  1556. def _end_newlocation(self):
  1557. url = self.pop('newlocation')
  1558. context = self._getContext()
  1559. # don't set newlocation if the context isn't right
  1560. if context is not self.feeddata:
  1561. return
  1562. context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
  1563. if _XML_AVAILABLE:
  1564. class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
  1565. def __init__(self, baseuri, baselang, encoding):
  1566. xml.sax.handler.ContentHandler.__init__(self)
  1567. _FeedParserMixin.__init__(self, baseuri, baselang, encoding)
  1568. self.bozo = 0
  1569. self.exc = None
  1570. self.decls = {}
  1571. def startPrefixMapping(self, prefix, uri):
  1572. if not uri:
  1573. return
  1574. # Jython uses '' instead of None; standardize on None
  1575. prefix = prefix or None
  1576. self.trackNamespace(prefix, uri)
  1577. if prefix and uri == 'http://www.w3.org/1999/xlink':
  1578. self.decls['xmlns:' + prefix] = uri
  1579. def startElementNS(self, name, qname, attrs):
  1580. namespace, localname = name
  1581. lowernamespace = str(namespace or '').lower()
  1582. if lowernamespace.find(u'backend.userland.com/rss') <> -1:
  1583. # match any backend.userland.com namespace
  1584. namespace = u'http://backend.userland.com/rss'
  1585. lowernamespace = namespace
  1586. if qname and qname.find(':') > 0:
  1587. givenprefix = qname.split(':')[0]
  1588. else:
  1589. givenprefix = None
  1590. prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
  1591. if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
  1592. raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
  1593. localname = str(localname).lower()
  1594. # qname implementation is horribly broken in Python 2.1 (it
  1595. # doesn't report any), and slightly broken in Python 2.2 (it
  1596. # doesn't report the xml: namespace). So we match up namespaces
  1597. # with a known list first, and then possibly override them with
  1598. # the qnames the SAX parser gives us (if indeed it gives us any
  1599. # at all). Thanks to MatejC for helping me test this and
  1600. # tirelessly telling me that it didn't work yet.
  1601. attrsD, self.decls = self.decls, {}
  1602. if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
  1603. attrsD['xmlns']=namespace
  1604. if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
  1605. attrsD['xmlns']=namespace
  1606. if prefix:
  1607. localname = prefix.lower() + ':' + localname
  1608. elif namespace and not qname: #Expat
  1609. for name,value in self.namespacesInUse.items():
  1610. if name and value == namespace:
  1611. localname = name + ':' + localname
  1612. break
  1613. for (namespace, attrlocalname), attrvalue in attrs.items():
  1614. lowernamespace = (namespace or '').lower()
  1615. prefix = self._matchnamespaces.get(lowernamespace, '')
  1616. if prefix:
  1617. attrlocalname = prefix + ':' + attrlocalname
  1618. attrsD[str(attrlocalname).lower()] = attrvalue
  1619. for qname in attrs.getQNames():
  1620. attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
  1621. self.unknown_starttag(localname, attrsD.items())
  1622. def characters(self, text):
  1623. self.handle_data(text)
  1624. def endElementNS(self, name, qname):
  1625. namespace, localname = name
  1626. lowernamespace = str(namespace or '').lower()
  1627. if qname and qname.find(':') > 0:
  1628. givenprefix = qname.split(':')[0]
  1629. else:
  1630. givenprefix = ''
  1631. prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
  1632. if prefix:
  1633. localname = prefix + ':' + localname
  1634. elif namespace and not qname: #Expat
  1635. for name,value in self.namespacesInUse.items():
  1636. if name and value == namespace:
  1637. localname = name + ':' + localname
  1638. break
  1639. localname = str(localname).lower()
  1640. self.unknown_endtag(localname)
  1641. def error(self, exc):
  1642. self.bozo = 1
  1643. self.exc = exc
  1644. # drv_libxml2 calls warning() in some cases
  1645. warning = error
  1646. def fatalError(self, exc):
  1647. self.error(exc)
  1648. raise exc
  1649. class _BaseHTMLProcessor(sgmllib.SGMLParser):
  1650. special = re.compile('''[<>'"]''')
  1651. bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
  1652. elements_no_end_tag = set([
  1653. 'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
  1654. 'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
  1655. 'source', 'track', 'wbr'
  1656. ])
  1657. def __init__(self, encoding, _type):
  1658. self.encoding = encoding
  1659. self._type = _type
  1660. sgmllib.SGMLParser.__init__(self)
  1661. def reset(self):
  1662. self.pieces = []
  1663. sgmllib.SGMLParser.reset(self)
  1664. def _shorttag_replace(self, match):
  1665. tag = match.group(1)
  1666. if tag in self.elements_no_end_tag:
  1667. return '<' + tag + ' />'
  1668. else:
  1669. return '<' + tag + '></' + tag + '>'
  1670. # By declaring these methods and overriding their compiled code
  1671. # with the code from sgmllib, the original code will execute in
  1672. # feedparser's scope instead of sgmllib's. This means that the
  1673. # `tagfind` and `charref` regular expressions will be found as
  1674. # they're declared above, not as they're declared in sgmllib.
  1675. def goahead(self, i):
  1676. pass
  1677. goahead.func_code = sgmllib.SGMLParser.goahead.func_code
  1678. def __parse_starttag(self, i):
  1679. pass
  1680. __parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
  1681. def parse_starttag(self,i):
  1682. j = self.__parse_starttag(i)
  1683. if self._type == 'application/xhtml+xml':
  1684. if j>2 and self.rawdata[j-2:j]=='/>':
  1685. self.unknown_endtag(self.lasttag)
  1686. return j
  1687. def feed(self, data):
  1688. data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'&lt;!\1', data)
  1689. data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
  1690. data = data.replace('&#39;', "'")
  1691. data = data.replace('&#34;', '"')
  1692. try:
  1693. bytes
  1694. if bytes is str:
  1695. raise NameError
  1696. self.encoding = self.encoding + u'_INVALID_PYTHON_3'
  1697. except NameError:
  1698. if self.encoding and isinstance(data, unicode):
  1699. data = data.encode(self.encoding)
  1700. sgmllib.SGMLParser.feed(self, data)
  1701. sgmllib.SGMLParser.close(self)
  1702. def normalize_attrs(self, attrs):
  1703. if not attrs:
  1704. return attrs
  1705. # utility method to be called by descendants
  1706. attrs = dict([(k.lower(), v) for k, v in attrs]).items()
  1707. attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
  1708. attrs.sort()
  1709. return attrs
  1710. def unknown_starttag(self, tag, attrs):
  1711. # called for each start tag
  1712. # attrs is a list of (attr, value) tuples
  1713. # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
  1714. uattrs = []
  1715. strattrs=''
  1716. if attrs:
  1717. for key, value in attrs:
  1718. value=value.replace('>','&gt;').replace('<','&lt;').replace('"','&quot;')
  1719. value = self.bare_ampersand.sub("&amp;", value)
  1720. # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
  1721. if not isinstance(value, unicode):
  1722. value = value.decode(self.encoding, 'ignore')
  1723. try:
  1724. # Currently, in Python 3 the key is already a str, and cannot be decoded again
  1725. uattrs.append((unicode(key, self.encoding), value))
  1726. except TypeError:
  1727. uattrs.append((key, value))
  1728. strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
  1729. if self.encoding:
  1730. try:
  1731. strattrs = strattrs.encode(self.encoding)
  1732. except (UnicodeEncodeError, LookupError):
  1733. pass
  1734. if tag in self.elements_no_end_tag:
  1735. self.pieces.append('<%s%s />' % (tag, strattrs))
  1736. else:
  1737. self.pieces.append('<%s%s>' % (tag, strattrs))
  1738. def unknown_endtag(self, tag):
  1739. # called for each end tag, e.g. for </pre>, tag will be 'pre'
  1740. # Reconstruct the original end tag.
  1741. if tag not in self.elements_no_end_tag:
  1742. self.pieces.append("</%s>" % tag)
  1743. def handle_charref(self, ref):
  1744. # called for each character reference, e.g. for '&#160;', ref will be '160'
  1745. # Reconstruct the original character reference.
  1746. ref = ref.lower()
  1747. if ref.startswith('x'):
  1748. value = int(ref[1:], 16)
  1749. else:
  1750. value = int(ref)
  1751. if value in _cp1252:
  1752. self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
  1753. else:
  1754. self.pieces.append('&#%s;' % ref)
  1755. def handle_entityref(self, ref):
  1756. # called for each entity reference, e.g. for '&copy;', ref will be 'copy'
  1757. # Reconstruct the original entity reference.
  1758. if ref in name2codepoint or ref == 'apos':
  1759. self.pieces.append('&%s;' % ref)
  1760. else:
  1761. self.pieces.append('&amp;%s' % ref)
  1762. def handle_data(self, text):
  1763. # called for each block of plain text, i.e. outside of any tag and
  1764. # not containing any character or entity references
  1765. # Store the original text verbatim.
  1766. self.pieces.append(text)
  1767. def handle_comment(self, text):
  1768. # called for each HTML comment, e.g. <!-- insert Javascript code here -->
  1769. # Reconstruct the original comment.
  1770. self.pieces.append('<!--%s-->' % text)
  1771. def handle_pi(self, text):
  1772. # called for each processing instruction, e.g. <?instruction>
  1773. # Reconstruct original processing instruction.
  1774. self.pieces.append('<?%s>' % text)
  1775. def handle_decl(self, text):
  1776. # called for the DOCTYPE, if present, e.g.
  1777. # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
  1778. # "http://www.w3.org/TR/html4/loose.dtd">
  1779. # Reconstruct original DOCTYPE
  1780. self.pieces.append('<!%s>' % text)
  1781. _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
  1782. def _scan_name(self, i, declstartpos):
  1783. rawdata = self.rawdata
  1784. n = len(rawdata)
  1785. if i == n:
  1786. return None, -1
  1787. m = self._new_declname_match(rawdata, i)
  1788. if m:
  1789. s = m.group()
  1790. name = s.strip()
  1791. if (i + len(s)) == n:
  1792. return None, -1 # end of buffer
  1793. return name.lower(), m.end()
  1794. else:
  1795. self.handle_data(rawdata)
  1796. # self.updatepos(declstartpos, i)
  1797. return None, -1
  1798. def convert_charref(self, name):
  1799. return '&#%s;' % name
  1800. def convert_entityref(self, name):
  1801. return '&%s;' % name
  1802. def output(self):
  1803. '''Return processed HTML as a single string'''
  1804. return ''.join([str(p) for p in self.pieces])
  1805. def parse_declaration(self, i):
  1806. try:
  1807. return sgmllib.SGMLParser.parse_declaration(self, i)
  1808. except sgmllib.SGMLParseError:
  1809. # escape the doctype declaration and continue parsing
  1810. self.handle_data('&lt;')
  1811. return i+1
  1812. class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
  1813. def __init__(self, baseuri, baselang, encoding, entities):
  1814. sgmllib.SGMLParser.__init__(self)
  1815. _FeedParserMixin.__init__(self, baseuri, baselang, encoding)
  1816. _BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
  1817. self.entities=entities
  1818. def decodeEntities(self, element, data):
  1819. data = data.replace('&#60;', '&lt;')
  1820. data = data.replace('&#x3c;', '&lt;')
  1821. data = data.replace('&#x3C;', '&lt;')
  1822. data = data.replace('&#62;', '&gt;')
  1823. data = data.replace('&#x3e;', '&gt;')
  1824. data = data.replace('&#x3E;', '&gt;')
  1825. data = data.replace('&#38;', '&amp;')
  1826. data = data.replace('&#x26;', '&amp;')
  1827. data = data.replace('&#34;', '&quot;')
  1828. data = data.replace('&#x22;', '&quot;')
  1829. data = data.replace('&#39;', '&apos;')
  1830. data = data.replace('&#x27;', '&apos;')
  1831. if not self.contentparams.get('type', u'xml').endswith(u'xml'):
  1832. data = data.replace('&lt;', '<')
  1833. data = data.replace('&gt;', '>')
  1834. data = data.replace('&amp;', '&')
  1835. data = data.replace('&quot;', '"')
  1836. data = data.replace('&apos;', "'")
  1837. return data
  1838. def strattrs(self, attrs):
  1839. return ''.join([' %s="%s"' % (n,v.replace('"','&quot;')) for n,v in attrs])
  1840. class _MicroformatsParser:
  1841. STRING = 1
  1842. DATE = 2
  1843. URI = 3
  1844. NODE = 4
  1845. EMAIL = 5
  1846. known_xfn_relationships = set(['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'])
  1847. known_binary_extensions = set(['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'])
  1848. def __init__(self, data, baseuri, encoding):
  1849. self.document = BeautifulSoup.BeautifulSoup(data)
  1850. self.baseuri = baseuri
  1851. self.encoding = encoding
  1852. if isinstance(data, unicode):
  1853. data = data.encode(encoding)
  1854. self.tags = []
  1855. self.enclosures = []
  1856. self.xfn = []
  1857. self.vcard = None
  1858. def vcardEscape(self, s):
  1859. if isinstance(s, basestring):
  1860. s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
  1861. return s
  1862. def vcardFold(self, s):
  1863. s = re.sub(';+$', '', s)
  1864. sFolded = ''
  1865. iMax = 75
  1866. sPrefix = ''
  1867. while len(s) > iMax:
  1868. sFolded += sPrefix + s[:iMax] + '\n'
  1869. s = s[iMax:]
  1870. sPrefix = ' '
  1871. iMax = 74
  1872. sFolded += sPrefix + s
  1873. return sFolded
  1874. def normalize(self, s):
  1875. return re.sub(r'\s+', ' ', s).strip()
  1876. def unique(self, aList):
  1877. results = []
  1878. for element in aList:
  1879. if element not in results:
  1880. results.append(element)
  1881. return results
  1882. def toISO8601(self, dt):
  1883. return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
  1884. def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
  1885. all = lambda x: 1
  1886. sProperty = sProperty.lower()
  1887. bFound = 0
  1888. bNormalize = 1
  1889. propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)}
  1890. if bAllowMultiple and (iPropertyType != self.NODE):
  1891. snapResults = []
  1892. containers = elmRoot(['ul', 'ol'], propertyMatch)
  1893. for container in containers:
  1894. snapResults.extend(container('li'))
  1895. bFound = (len(snapResults) != 0)
  1896. if not bFound:
  1897. snapResults = elmRoot(all, propertyMatch)
  1898. bFound = (len(snapResults) != 0)
  1899. if (not bFound) and (sProperty == 'value'):
  1900. snapResults = elmRoot('pre')
  1901. bFound = (len(snapResults) != 0)
  1902. bNormalize = not bFound
  1903. if not bFound:
  1904. snapResults = [elmRoot]
  1905. bFound = (len(snapResults) != 0)
  1906. arFilter = []
  1907. if sProperty == 'vcard':
  1908. snapFilter = elmRoot(all, propertyMatch)
  1909. for node in snapFilter:
  1910. if node.findParent(all, propertyMatch):
  1911. arFilter.append(node)
  1912. arResults = []
  1913. for node in snapResults:
  1914. if node not in arFilter:
  1915. arResults.append(node)
  1916. bFound = (len(arResults) != 0)
  1917. if not bFound:
  1918. if bAllowMultiple:
  1919. return []
  1920. elif iPropertyType == self.STRING:
  1921. return ''
  1922. elif iPropertyType == self.DATE:
  1923. return None
  1924. elif iPropertyType == self.URI:
  1925. return ''
  1926. elif iPropertyType == self.NODE:
  1927. return None
  1928. else:
  1929. return None
  1930. arValues = []
  1931. for elmResult in arResults:
  1932. sValue = None
  1933. if iPropertyType == self.NODE:
  1934. if bAllowMultiple:
  1935. arValues.append(elmResult)
  1936. continue
  1937. else:
  1938. return elmResult
  1939. sNodeName = elmResult.name.lower()
  1940. if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
  1941. sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
  1942. if sValue:
  1943. sValue = bNormalize and self.normalize(sValue) or sValue.strip()
  1944. if (not sValue) and (sNodeName == 'abbr'):
  1945. sValue = elmResult.get('title')
  1946. if sValue:
  1947. sValue = bNormalize and self.normalize(sValue) or sValue.strip()
  1948. if (not sValue) and (iPropertyType == self.URI):
  1949. if sNodeName == 'a':
  1950. sValue = elmResult.get('href')
  1951. elif sNodeName == 'img':
  1952. sValue = elmResult.get('src')
  1953. elif sNodeName == 'object':
  1954. sValue = elmResult.get('data')
  1955. if sValue:
  1956. sValue = bNormalize and self.normalize(sValue) or sValue.strip()
  1957. if (not sValue) and (sNodeName == 'img'):
  1958. sValue = elmResult.get('alt')
  1959. if sValue:
  1960. sValue = bNormalize and self.normalize(sValue) or sValue.strip()
  1961. if not sValue:
  1962. sValue = elmResult.renderContents()
  1963. sValue = re.sub(r'<\S[^>]*>', '', sValue)
  1964. sValue = sValue.replace('\r\n', '\n')
  1965. sValue = sValue.replace('\r', '\n')
  1966. if sValue:
  1967. sValue = bNormalize and self.normalize(sValue) or sValue.strip()
  1968. if not sValue:
  1969. continue
  1970. if iPropertyType == self.DATE:
  1971. sValue = _parse_date_iso8601(sValue)
  1972. if bAllowMultiple:
  1973. arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
  1974. else:
  1975. return bAutoEscape and self.vcardEscape(sValue) or sValue
  1976. return arValues
  1977. def findVCards(self, elmRoot, bAgentParsing=0):
  1978. sVCards = ''
  1979. if not bAgentParsing:
  1980. arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
  1981. else:
  1982. arCards = [elmRoot]
  1983. for elmCard in arCards:
  1984. arLines = []
  1985. def processSingleString(sProperty):
  1986. sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding)
  1987. if sValue:
  1988. arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
  1989. return sValue or u''
  1990. def processSingleURI(sProperty):
  1991. sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
  1992. if sValue:
  1993. sContentType = ''
  1994. sEncoding = ''
  1995. sValueKey = ''
  1996. if sValue.startswith('data:'):
  1997. sEncoding = ';ENCODING=b'
  1998. sContentType = sValue.split(';')[0].split('/').pop()
  1999. sValue = sValue.split(',', 1).pop()
  2000. else:
  2001. elmValue = self.getPropertyValue(elmCard, sProperty)
  2002. if elmValue:
  2003. if sProperty != 'url':
  2004. sValueKey = ';VALUE=uri'
  2005. sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
  2006. sContentType = sContentType.upper()
  2007. if sContentType == 'OCTET-STREAM':
  2008. sContentType = ''
  2009. if sContentType:
  2010. sContentType = ';TYPE=' + sContentType.upper()
  2011. arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
  2012. def processTypeValue(sProperty, arDefaultType, arForceType=None):
  2013. arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
  2014. for elmResult in arResults:
  2015. arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
  2016. if arForceType:
  2017. arType = self.unique(arForceType + arType)
  2018. if not arType:
  2019. arType = arDefaultType
  2020. sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
  2021. if sValue:
  2022. arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
  2023. # AGENT
  2024. # must do this before all other properties because it is destructive
  2025. # (removes nested class="vcard" nodes so they don't interfere with
  2026. # this vcard's other properties)
  2027. arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
  2028. for elmAgent in arAgent:
  2029. if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
  2030. sAgentValue = self.findVCards(elmAgent, 1) + '\n'
  2031. sAgentValue = sAgentValue.replace('\n', '\\n')
  2032. sAgentValue = sAgentValue.replace(';', '\\;')
  2033. if sAgentValue:
  2034. arLines.append(self.vcardFold('AGENT:' + sAgentValue))
  2035. # Completely remove the agent element from the parse tree
  2036. elmAgent.extract()
  2037. else:
  2038. sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
  2039. if sAgentValue:
  2040. arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
  2041. # FN (full name)
  2042. sFN = processSingleString('fn')
  2043. # N (name)
  2044. elmName = self.getPropertyValue(elmCard, 'n')
  2045. if elmName:
  2046. sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
  2047. sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
  2048. arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
  2049. arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
  2050. arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
  2051. arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
  2052. sGivenName + ';' +
  2053. ','.join(arAdditionalNames) + ';' +
  2054. ','.join(arHonorificPrefixes) + ';' +
  2055. ','.join(arHonorificSuffixes)))
  2056. elif sFN:
  2057. # implied "N" optimization
  2058. # http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
  2059. arNames = self.normalize(sFN).split()
  2060. if len(arNames) == 2:
  2061. bFamilyNameFirst = (arNames[0].endswith(',') or
  2062. len(arNames[1]) == 1 or
  2063. ((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
  2064. if bFamilyNameFirst:
  2065. arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
  2066. else:
  2067. arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
  2068. # SORT-STRING
  2069. sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
  2070. if sSortString:
  2071. arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
  2072. # NICKNAME
  2073. arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
  2074. if arNickname:
  2075. arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
  2076. # PHOTO
  2077. processSingleURI('photo')
  2078. # BDAY
  2079. dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
  2080. if dtBday:
  2081. arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
  2082. # ADR (address)
  2083. arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
  2084. for elmAdr in arAdr:
  2085. arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
  2086. if not arType:
  2087. arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
  2088. sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
  2089. sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
  2090. sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
  2091. sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
  2092. sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
  2093. sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
  2094. sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
  2095. arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
  2096. sPostOfficeBox + ';' +
  2097. sExtendedAddress + ';' +
  2098. sStreetAddress + ';' +
  2099. sLocality + ';' +
  2100. sRegion + ';' +
  2101. sPostalCode + ';' +
  2102. sCountryName))
  2103. # LABEL
  2104. processTypeValue('label', ['intl','postal','parcel','work'])
  2105. # TEL (phone number)
  2106. processTypeValue('tel', ['voice'])
  2107. # EMAIL
  2108. processTypeValue('email', ['internet'], ['internet'])
  2109. # MAILER
  2110. processSingleString('mailer')
  2111. # TZ (timezone)
  2112. processSingleString('tz')
  2113. # GEO (geographical information)
  2114. elmGeo = self.getPropertyValue(elmCard, 'geo')
  2115. if elmGeo:
  2116. sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
  2117. sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
  2118. arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
  2119. # TITLE
  2120. processSingleString('title')
  2121. # ROLE
  2122. processSingleString('role')
  2123. # LOGO
  2124. processSingleURI('logo')
  2125. # ORG (organization)
  2126. elmOrg = self.getPropertyValue(elmCard, 'org')
  2127. if elmOrg:
  2128. sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
  2129. if not sOrganizationName:
  2130. # implied "organization-name" optimization
  2131. # http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
  2132. sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
  2133. if sOrganizationName:
  2134. arLines.append(self.vcardFold('ORG:' + sOrganizationName))
  2135. else:
  2136. arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
  2137. arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
  2138. # CATEGORY
  2139. arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
  2140. if arCategory:
  2141. arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
  2142. # NOTE
  2143. processSingleString('note')
  2144. # REV
  2145. processSingleString('rev')
  2146. # SOUND
  2147. processSingleURI('sound')
  2148. # UID
  2149. processSingleString('uid')
  2150. # URL
  2151. processSingleURI('url')
  2152. # CLASS
  2153. processSingleString('class')
  2154. # KEY
  2155. processSingleURI('key')
  2156. if arLines:
  2157. arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard']
  2158. # XXX - this is super ugly; properly fix this with issue 148
  2159. for i, s in enumerate(arLines):
  2160. if not isinstance(s, unicode):
  2161. arLines[i] = s.decode('utf-8', 'ignore')
  2162. sVCards += u'\n'.join(arLines) + u'\n'
  2163. return sVCards.strip()
  2164. def isProbablyDownloadable(self, elm):
  2165. attrsD = elm.attrMap
  2166. if 'href' not in attrsD:
  2167. return 0
  2168. linktype = attrsD.get('type', '').strip()
  2169. if linktype.startswith('audio/') or \
  2170. linktype.startswith('video/') or \
  2171. (linktype.startswith('application/') and not linktype.endswith('xml')):
  2172. return 1
  2173. try:
  2174. path = urlparse.urlparse(attrsD['href'])[2]
  2175. except ValueError:
  2176. return 0
  2177. if path.find('.') == -1:
  2178. return 0
  2179. fileext = path.split('.').pop().lower()
  2180. return fileext in self.known_binary_extensions
  2181. def findTags(self):
  2182. all = lambda x: 1
  2183. for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
  2184. href = elm.get('href')
  2185. if not href:
  2186. continue
  2187. urlscheme, domain, path, params, query, fragment = \
  2188. urlparse.urlparse(_urljoin(self.baseuri, href))
  2189. segments = path.split('/')
  2190. tag = segments.pop()
  2191. if not tag:
  2192. if segments:
  2193. tag = segments.pop()
  2194. else:
  2195. # there are no tags
  2196. continue
  2197. tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
  2198. if not tagscheme.endswith('/'):
  2199. tagscheme += '/'
  2200. self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
  2201. def findEnclosures(self):
  2202. all = lambda x: 1
  2203. enclosure_match = re.compile(r'\benclosure\b')
  2204. for elm in self.document(all, {'href': re.compile(r'.+')}):
  2205. if not enclosure_match.search(elm.get('rel', u'')) and not self.isProbablyDownloadable(elm):
  2206. continue
  2207. if elm.attrMap not in self.enclosures:
  2208. self.enclosures.append(elm.attrMap)
  2209. if elm.string and not elm.get('title'):
  2210. self.enclosures[-1]['title'] = elm.string
  2211. def findXFN(self):
  2212. all = lambda x: 1
  2213. for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
  2214. rels = elm.get('rel', u'').split()
  2215. xfn_rels = [r for r in rels if r in self.known_xfn_relationships]
  2216. if xfn_rels:
  2217. self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
  2218. def _parseMicroformats(htmlSource, baseURI, encoding):
  2219. if not BeautifulSoup:
  2220. return
  2221. try:
  2222. p = _MicroformatsParser(htmlSource, baseURI, encoding)
  2223. except UnicodeEncodeError:
  2224. # sgmllib throws this exception when performing lookups of tags
  2225. # with non-ASCII characters in them.
  2226. return
  2227. p.vcard = p.findVCards(p.document)
  2228. p.findTags()
  2229. p.findEnclosures()
  2230. p.findXFN()
  2231. return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
  2232. class _RelativeURIResolver(_BaseHTMLProcessor):
  2233. relative_uris = set([('a', 'href'),
  2234. ('applet', 'codebase'),
  2235. ('area', 'href'),
  2236. ('blockquote', 'cite'),
  2237. ('body', 'background'),
  2238. ('del', 'cite'),
  2239. ('form', 'action'),
  2240. ('frame', 'longdesc'),
  2241. ('frame', 'src'),
  2242. ('iframe', 'longdesc'),
  2243. ('iframe', 'src'),
  2244. ('head', 'profile'),
  2245. ('img', 'longdesc'),
  2246. ('img', 'src'),
  2247. ('img', 'usemap'),
  2248. ('input', 'src'),
  2249. ('input', 'usemap'),
  2250. ('ins', 'cite'),
  2251. ('link', 'href'),
  2252. ('object', 'classid'),
  2253. ('object', 'codebase'),
  2254. ('object', 'data'),
  2255. ('object', 'usemap'),
  2256. ('q', 'cite'),
  2257. ('script', 'src'),
  2258. ('video', 'poster')])
  2259. def __init__(self, baseuri, encoding, _type):
  2260. _BaseHTMLProcessor.__init__(self, encoding, _type)
  2261. self.baseuri = baseuri
  2262. def resolveURI(self, uri):
  2263. return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
  2264. def unknown_starttag(self, tag, attrs):
  2265. attrs = self.normalize_attrs(attrs)
  2266. attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
  2267. _BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
  2268. def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
  2269. if not _SGML_AVAILABLE:
  2270. return htmlSource
  2271. p = _RelativeURIResolver(baseURI, encoding, _type)
  2272. p.feed(htmlSource)
  2273. return p.output()
  2274. def _makeSafeAbsoluteURI(base, rel=None):
  2275. # bail if ACCEPTABLE_URI_SCHEMES is empty
  2276. if not ACCEPTABLE_URI_SCHEMES:
  2277. try:
  2278. return _urljoin(base, rel or u'')
  2279. except ValueError:
  2280. return u''
  2281. if not base:
  2282. return rel or u''
  2283. if not rel:
  2284. try:
  2285. scheme = urlparse.urlparse(base)[0]
  2286. except ValueError:
  2287. return u''
  2288. if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
  2289. return base
  2290. return u''
  2291. try:
  2292. uri = _urljoin(base, rel)
  2293. except ValueError:
  2294. return u''
  2295. if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
  2296. return u''
  2297. return uri
  2298. class _HTMLSanitizer(_BaseHTMLProcessor):
  2299. acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
  2300. 'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
  2301. 'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
  2302. 'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
  2303. 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
  2304. 'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
  2305. 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
  2306. 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
  2307. 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
  2308. 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
  2309. 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
  2310. 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
  2311. 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
  2312. acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
  2313. 'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
  2314. 'background', 'balance', 'bgcolor', 'bgproperties', 'border',
  2315. 'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
  2316. 'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
  2317. 'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
  2318. 'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
  2319. 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
  2320. 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
  2321. 'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
  2322. 'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
  2323. 'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
  2324. 'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
  2325. 'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
  2326. 'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
  2327. 'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel',
  2328. 'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing',
  2329. 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span',
  2330. 'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target',
  2331. 'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
  2332. 'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
  2333. 'width', 'wrap', 'xml:lang'])
  2334. unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
  2335. acceptable_css_properties = set(['azimuth', 'background-color',
  2336. 'border-bottom-color', 'border-collapse', 'border-color',
  2337. 'border-left-color', 'border-right-color', 'border-top-color', 'clear',
  2338. 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
  2339. 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
  2340. 'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
  2341. 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
  2342. 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
  2343. 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
  2344. 'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
  2345. 'white-space', 'width'])
  2346. # survey of common keywords found in feeds
  2347. acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
  2348. 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
  2349. 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
  2350. 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
  2351. 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
  2352. 'transparent', 'underline', 'white', 'yellow'])
  2353. valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
  2354. '\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
  2355. mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
  2356. 'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
  2357. 'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
  2358. 'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
  2359. 'munderover', 'none', 'semantics'])
  2360. mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
  2361. 'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
  2362. 'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
  2363. 'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
  2364. 'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
  2365. 'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
  2366. 'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
  2367. 'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
  2368. 'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
  2369. # svgtiny - foreignObject + linearGradient + radialGradient + stop
  2370. svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
  2371. 'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
  2372. 'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
  2373. 'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
  2374. 'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
  2375. 'svg', 'switch', 'text', 'title', 'tspan', 'use'])
  2376. # svgtiny + class + opacity + offset + xmlns + xmlns:xlink
  2377. svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
  2378. 'arabic-form', 'ascent', 'attributeName', 'attributeType',
  2379. 'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
  2380. 'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
  2381. 'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
  2382. 'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
  2383. 'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
  2384. 'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
  2385. 'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
  2386. 'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
  2387. 'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
  2388. 'min', 'name', 'offset', 'opacity', 'orient', 'origin',
  2389. 'overline-position', 'overline-thickness', 'panose-1', 'path',
  2390. 'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
  2391. 'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
  2392. 'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
  2393. 'stop-color', 'stop-opacity', 'strikethrough-position',
  2394. 'strikethrough-thickness', 'stroke', 'stroke-dasharray',
  2395. 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
  2396. 'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
  2397. 'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
  2398. 'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
  2399. 'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
  2400. 'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
  2401. 'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
  2402. 'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
  2403. 'y2', 'zoomAndPan'])
  2404. svg_attr_map = None
  2405. svg_elem_map = None
  2406. acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
  2407. 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
  2408. 'stroke-opacity'])
  2409. def reset(self):
  2410. _BaseHTMLProcessor.reset(self)
  2411. self.unacceptablestack = 0
  2412. self.mathmlOK = 0
  2413. self.svgOK = 0
  2414. def unknown_starttag(self, tag, attrs):
  2415. acceptable_attributes = self.acceptable_attributes
  2416. keymap = {}
  2417. if not tag in self.acceptable_elements or self.svgOK:
  2418. if tag in self.unacceptable_elements_with_end_tag:
  2419. self.unacceptablestack += 1
  2420. # add implicit namespaces to html5 inline svg/mathml
  2421. if self._type.endswith('html'):
  2422. if not dict(attrs).get('xmlns'):
  2423. if tag=='svg':
  2424. attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
  2425. if tag=='math':
  2426. attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
  2427. # not otherwise acceptable, perhaps it is MathML or SVG?
  2428. if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
  2429. self.mathmlOK += 1
  2430. if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
  2431. self.svgOK += 1
  2432. # chose acceptable attributes based on tag class, else bail
  2433. if self.mathmlOK and tag in self.mathml_elements:
  2434. acceptable_attributes = self.mathml_attributes
  2435. elif self.svgOK and tag in self.svg_elements:
  2436. # for most vocabularies, lowercasing is a good idea. Many
  2437. # svg elements, however, are camel case
  2438. if not self.svg_attr_map:
  2439. lower=[attr.lower() for attr in self.svg_attributes]
  2440. mix=[a for a in self.svg_attributes if a not in lower]
  2441. self.svg_attributes = lower
  2442. self.svg_attr_map = dict([(a.lower(),a) for a in mix])
  2443. lower=[attr.lower() for attr in self.svg_elements]
  2444. mix=[a for a in self.svg_elements if a not in lower]
  2445. self.svg_elements = lower
  2446. self.svg_elem_map = dict([(a.lower(),a) for a in mix])
  2447. acceptable_attributes = self.svg_attributes
  2448. tag = self.svg_elem_map.get(tag,tag)
  2449. keymap = self.svg_attr_map
  2450. elif not tag in self.acceptable_elements:
  2451. return
  2452. # declare xlink namespace, if needed
  2453. if self.mathmlOK or self.svgOK:
  2454. if filter(lambda (n,v): n.startswith('xlink:'),attrs):
  2455. if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
  2456. attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
  2457. clean_attrs = []
  2458. for key, value in self.normalize_attrs(attrs):
  2459. if key in acceptable_attributes:
  2460. key=keymap.get(key,key)
  2461. # make sure the uri uses an acceptable uri scheme
  2462. if key == u'href':
  2463. value = _makeSafeAbsoluteURI(value)
  2464. clean_attrs.append((key,value))
  2465. elif key=='style':
  2466. clean_value = self.sanitize_style(value)
  2467. if clean_value:
  2468. clean_attrs.append((key,clean_value))
  2469. _BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
  2470. def unknown_endtag(self, tag):
  2471. if not tag in self.acceptable_elements:
  2472. if tag in self.unacceptable_elements_with_end_tag:
  2473. self.unacceptablestack -= 1
  2474. if self.mathmlOK and tag in self.mathml_elements:
  2475. if tag == 'math' and self.mathmlOK:
  2476. self.mathmlOK -= 1
  2477. elif self.svgOK and tag in self.svg_elements:
  2478. tag = self.svg_elem_map.get(tag,tag)
  2479. if tag == 'svg' and self.svgOK:
  2480. self.svgOK -= 1
  2481. else:
  2482. return
  2483. _BaseHTMLProcessor.unknown_endtag(self, tag)
  2484. def handle_pi(self, text):
  2485. pass
  2486. def handle_decl(self, text):
  2487. pass
  2488. def handle_data(self, text):
  2489. if not self.unacceptablestack:
  2490. _BaseHTMLProcessor.handle_data(self, text)
  2491. def sanitize_style(self, style):
  2492. # disallow urls
  2493. style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
  2494. # gauntlet
  2495. if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
  2496. return ''
  2497. # This replaced a regexp that used re.match and was prone to pathological back-tracking.
  2498. if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
  2499. return ''
  2500. clean = []
  2501. for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
  2502. if not value:
  2503. continue
  2504. if prop.lower() in self.acceptable_css_properties:
  2505. clean.append(prop + ': ' + value + ';')
  2506. elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
  2507. for keyword in value.split():
  2508. if not keyword in self.acceptable_css_keywords and \
  2509. not self.valid_css_values.match(keyword):
  2510. break
  2511. else:
  2512. clean.append(prop + ': ' + value + ';')
  2513. elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
  2514. clean.append(prop + ': ' + value + ';')
  2515. return ' '.join(clean)
  2516. def parse_comment(self, i, report=1):
  2517. ret = _BaseHTMLProcessor.parse_comment(self, i, report)
  2518. if ret >= 0:
  2519. return ret
  2520. # if ret == -1, this may be a malicious attempt to circumvent
  2521. # sanitization, or a page-destroying unclosed comment
  2522. match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
  2523. if match:
  2524. return match.end()
  2525. # unclosed comment; deliberately fail to handle_data()
  2526. return len(self.rawdata)
  2527. def _sanitizeHTML(htmlSource, encoding, _type):
  2528. if not _SGML_AVAILABLE:
  2529. return htmlSource
  2530. p = _HTMLSanitizer(encoding, _type)
  2531. htmlSource = htmlSource.replace('<![CDATA[', '&lt;![CDATA[')
  2532. p.feed(htmlSource)
  2533. data = p.output()
  2534. if TIDY_MARKUP:
  2535. # loop through list of preferred Tidy interfaces looking for one that's installed,
  2536. # then set up a common _tidy function to wrap the interface-specific API.
  2537. _tidy = None
  2538. for tidy_interface in PREFERRED_TIDY_INTERFACES:
  2539. try:
  2540. if tidy_interface == "uTidy":
  2541. from tidy import parseString as _utidy
  2542. def _tidy(data, **kwargs):
  2543. return str(_utidy(data, **kwargs))
  2544. break
  2545. elif tidy_interface == "mxTidy":
  2546. from mx.Tidy import Tidy as _mxtidy
  2547. def _tidy(data, **kwargs):
  2548. nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
  2549. return data
  2550. break
  2551. except:
  2552. pass
  2553. if _tidy:
  2554. utf8 = isinstance(data, unicode)
  2555. if utf8:
  2556. data = data.encode('utf-8')
  2557. data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
  2558. if utf8:
  2559. data = unicode(data, 'utf-8')
  2560. if data.count('<body'):
  2561. data = data.split('<body', 1)[1]
  2562. if data.count('>'):
  2563. data = data.split('>', 1)[1]
  2564. if data.count('</body'):
  2565. data = data.split('</body', 1)[0]
  2566. data = data.strip().replace('\r\n', '\n')
  2567. return data
  2568. class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
  2569. def http_error_default(self, req, fp, code, msg, headers):
  2570. # The default implementation just raises HTTPError.
  2571. # Forget that.
  2572. fp.status = code
  2573. return fp
  2574. def http_error_301(self, req, fp, code, msg, hdrs):
  2575. result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
  2576. code, msg, hdrs)
  2577. result.status = code
  2578. result.newurl = result.geturl()
  2579. return result
  2580. # The default implementations in urllib2.HTTPRedirectHandler
  2581. # are identical, so hardcoding a http_error_301 call above
  2582. # won't affect anything
  2583. http_error_300 = http_error_301
  2584. http_error_302 = http_error_301
  2585. http_error_303 = http_error_301
  2586. http_error_307 = http_error_301
  2587. def http_error_401(self, req, fp, code, msg, headers):
  2588. # Check if
  2589. # - server requires digest auth, AND
  2590. # - we tried (unsuccessfully) with basic auth, AND
  2591. # If all conditions hold, parse authentication information
  2592. # out of the Authorization header we sent the first time
  2593. # (for the username and password) and the WWW-Authenticate
  2594. # header the server sent back (for the realm) and retry
  2595. # the request with the appropriate digest auth headers instead.
  2596. # This evil genius hack has been brought to you by Aaron Swartz.
  2597. host = urlparse.urlparse(req.get_full_url())[1]
  2598. if base64 is None or 'Authorization' not in req.headers \
  2599. or 'WWW-Authenticate' not in headers:
  2600. return self.http_error_default(req, fp, code, msg, headers)
  2601. auth = _base64decode(req.headers['Authorization'].split(' ')[1])
  2602. user, passw = auth.split(':')
  2603. realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
  2604. self.add_password(realm, host, user, passw)
  2605. retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
  2606. self.reset_retry_count()
  2607. return retry
  2608. def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
  2609. """URL, filename, or string --> stream
  2610. This function lets you define parsers that take any input source
  2611. (URL, pathname to local or network file, or actual data as a string)
  2612. and deal with it in a uniform manner. Returned object is guaranteed
  2613. to have all the basic stdio read methods (read, readline, readlines).
  2614. Just .close() the object when you're done with it.
  2615. If the etag argument is supplied, it will be used as the value of an
  2616. If-None-Match request header.
  2617. If the modified argument is supplied, it can be a tuple of 9 integers
  2618. (as returned by gmtime() in the standard Python time module) or a date
  2619. string in any format supported by feedparser. Regardless, it MUST
  2620. be in GMT (Greenwich Mean Time). It will be reformatted into an
  2621. RFC 1123-compliant date and used as the value of an If-Modified-Since
  2622. request header.
  2623. If the agent argument is supplied, it will be used as the value of a
  2624. User-Agent request header.
  2625. If the referrer argument is supplied, it will be used as the value of a
  2626. Referer[sic] request header.
  2627. If handlers is supplied, it is a list of handlers used to build a
  2628. urllib2 opener.
  2629. if request_headers is supplied it is a dictionary of HTTP request headers
  2630. that will override the values generated by FeedParser.
  2631. """
  2632. if hasattr(url_file_stream_or_string, 'read'):
  2633. return url_file_stream_or_string
  2634. if isinstance(url_file_stream_or_string, basestring) \
  2635. and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
  2636. # Deal with the feed URI scheme
  2637. if url_file_stream_or_string.startswith('feed:http'):
  2638. url_file_stream_or_string = url_file_stream_or_string[5:]
  2639. elif url_file_stream_or_string.startswith('feed:'):
  2640. url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
  2641. if not agent:
  2642. agent = USER_AGENT
  2643. # Test for inline user:password credentials for HTTP basic auth
  2644. auth = None
  2645. if base64 and not url_file_stream_or_string.startswith('ftp:'):
  2646. urltype, rest = urllib.splittype(url_file_stream_or_string)
  2647. realhost, rest = urllib.splithost(rest)
  2648. if realhost:
  2649. user_passwd, realhost = urllib.splituser(realhost)
  2650. if user_passwd:
  2651. url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
  2652. auth = base64.standard_b64encode(user_passwd).strip()
  2653. # iri support
  2654. if isinstance(url_file_stream_or_string, unicode):
  2655. url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
  2656. # try to open with urllib2 (to use optional headers)
  2657. request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
  2658. opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
  2659. opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
  2660. try:
  2661. return opener.open(request)
  2662. finally:
  2663. opener.close() # JohnD
  2664. # try to open with native open function (if url_file_stream_or_string is a filename)
  2665. try:
  2666. return open(url_file_stream_or_string, 'rb')
  2667. except (IOError, UnicodeEncodeError, TypeError):
  2668. # if url_file_stream_or_string is a unicode object that
  2669. # cannot be converted to the encoding returned by
  2670. # sys.getfilesystemencoding(), a UnicodeEncodeError
  2671. # will be thrown
  2672. # If url_file_stream_or_string is a string that contains NULL
  2673. # (such as an XML document encoded in UTF-32), TypeError will
  2674. # be thrown.
  2675. pass
  2676. # treat url_file_stream_or_string as string
  2677. if isinstance(url_file_stream_or_string, unicode):
  2678. return _StringIO(url_file_stream_or_string.encode('utf-8'))
  2679. return _StringIO(url_file_stream_or_string)
  2680. def _convert_to_idn(url):
  2681. """Convert a URL to IDN notation"""
  2682. # this function should only be called with a unicode string
  2683. # strategy: if the host cannot be encoded in ascii, then
  2684. # it'll be necessary to encode it in idn form
  2685. parts = list(urlparse.urlsplit(url))
  2686. try:
  2687. parts[1].encode('ascii')
  2688. except UnicodeEncodeError:
  2689. # the url needs to be converted to idn notation
  2690. host = parts[1].rsplit(':', 1)
  2691. newhost = []
  2692. port = u''
  2693. if len(host) == 2:
  2694. port = host.pop()
  2695. for h in host[0].split('.'):
  2696. newhost.append(h.encode('idna').decode('utf-8'))
  2697. parts[1] = '.'.join(newhost)
  2698. if port:
  2699. parts[1] += ':' + port
  2700. return urlparse.urlunsplit(parts)
  2701. else:
  2702. return url
  2703. def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
  2704. request = urllib2.Request(url)
  2705. request.add_header('User-Agent', agent)
  2706. if etag:
  2707. request.add_header('If-None-Match', etag)
  2708. if isinstance(modified, basestring):
  2709. modified = _parse_date(modified)
  2710. elif isinstance(modified, datetime.datetime):
  2711. modified = modified.utctimetuple()
  2712. if modified:
  2713. # format into an RFC 1123-compliant timestamp. We can't use
  2714. # time.strftime() since the %a and %b directives can be affected
  2715. # by the current locale, but RFC 2616 states that dates must be
  2716. # in English.
  2717. short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
  2718. months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
  2719. request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
  2720. if referrer:
  2721. request.add_header('Referer', referrer)
  2722. if gzip and zlib:
  2723. request.add_header('Accept-encoding', 'gzip, deflate')
  2724. elif gzip:
  2725. request.add_header('Accept-encoding', 'gzip')
  2726. elif zlib:
  2727. request.add_header('Accept-encoding', 'deflate')
  2728. else:
  2729. request.add_header('Accept-encoding', '')
  2730. if auth:
  2731. request.add_header('Authorization', 'Basic %s' % auth)
  2732. if ACCEPT_HEADER:
  2733. request.add_header('Accept', ACCEPT_HEADER)
  2734. # use this for whatever -- cookies, special headers, etc
  2735. # [('Cookie','Something'),('x-special-header','Another Value')]
  2736. for header_name, header_value in request_headers.items():
  2737. request.add_header(header_name, header_value)
  2738. request.add_header('A-IM', 'feed') # RFC 3229 support
  2739. return request
  2740. _date_handlers = []
  2741. def registerDateHandler(func):
  2742. '''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
  2743. _date_handlers.insert(0, func)
  2744. # ISO-8601 date parsing routines written by Fazal Majid.
  2745. # The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
  2746. # parser is beyond the scope of feedparser and would be a worthwhile addition
  2747. # to the Python library.
  2748. # A single regular expression cannot parse ISO 8601 date formats into groups
  2749. # as the standard is highly irregular (for instance is 030104 2003-01-04 or
  2750. # 0301-04-01), so we use templates instead.
  2751. # Please note the order in templates is significant because we need a
  2752. # greedy match.
  2753. _iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
  2754. 'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
  2755. '-YY-?MM', '-OOO', '-YY',
  2756. '--MM-?DD', '--MM',
  2757. '---DD',
  2758. 'CC', '']
  2759. _iso8601_re = [
  2760. tmpl.replace(
  2761. 'YYYY', r'(?P<year>\d{4})').replace(
  2762. 'YY', r'(?P<year>\d\d)').replace(
  2763. 'MM', r'(?P<month>[01]\d)').replace(
  2764. 'DD', r'(?P<day>[0123]\d)').replace(
  2765. 'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
  2766. 'CC', r'(?P<century>\d\d$)')
  2767. + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
  2768. + r'(:(?P<second>\d{2}))?'
  2769. + r'(\.(?P<fracsecond>\d+))?'
  2770. + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
  2771. for tmpl in _iso8601_tmpl]
  2772. try:
  2773. del tmpl
  2774. except NameError:
  2775. pass
  2776. _iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
  2777. try:
  2778. del regex
  2779. except NameError:
  2780. pass
  2781. def _parse_date_iso8601(dateString):
  2782. '''Parse a variety of ISO-8601-compatible formats like 20040105'''
  2783. m = None
  2784. for _iso8601_match in _iso8601_matches:
  2785. m = _iso8601_match(dateString)
  2786. if m:
  2787. break
  2788. if not m:
  2789. return
  2790. if m.span() == (0, 0):
  2791. return
  2792. params = m.groupdict()
  2793. ordinal = params.get('ordinal', 0)
  2794. if ordinal:
  2795. ordinal = int(ordinal)
  2796. else:
  2797. ordinal = 0
  2798. year = params.get('year', '--')
  2799. if not year or year == '--':
  2800. year = time.gmtime()[0]
  2801. elif len(year) == 2:
  2802. # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
  2803. year = 100 * int(time.gmtime()[0] / 100) + int(year)
  2804. else:
  2805. year = int(year)
  2806. month = params.get('month', '-')
  2807. if not month or month == '-':
  2808. # ordinals are NOT normalized by mktime, we simulate them
  2809. # by setting month=1, day=ordinal
  2810. if ordinal:
  2811. month = 1
  2812. else:
  2813. month = time.gmtime()[1]
  2814. month = int(month)
  2815. day = params.get('day', 0)
  2816. if not day:
  2817. # see above
  2818. if ordinal:
  2819. day = ordinal
  2820. elif params.get('century', 0) or \
  2821. params.get('year', 0) or params.get('month', 0):
  2822. day = 1
  2823. else:
  2824. day = time.gmtime()[2]
  2825. else:
  2826. day = int(day)
  2827. # special case of the century - is the first year of the 21st century
  2828. # 2000 or 2001 ? The debate goes on...
  2829. if 'century' in params:
  2830. year = (int(params['century']) - 1) * 100 + 1
  2831. # in ISO 8601 most fields are optional
  2832. for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
  2833. if not params.get(field, None):
  2834. params[field] = 0
  2835. hour = int(params.get('hour', 0))
  2836. minute = int(params.get('minute', 0))
  2837. second = int(float(params.get('second', 0)))
  2838. # weekday is normalized by mktime(), we can ignore it
  2839. weekday = 0
  2840. daylight_savings_flag = -1
  2841. tm = [year, month, day, hour, minute, second, weekday,
  2842. ordinal, daylight_savings_flag]
  2843. # ISO 8601 time zone adjustments
  2844. tz = params.get('tz')
  2845. if tz and tz != 'Z':
  2846. if tz[0] == '-':
  2847. tm[3] += int(params.get('tzhour', 0))
  2848. tm[4] += int(params.get('tzmin', 0))
  2849. elif tz[0] == '+':
  2850. tm[3] -= int(params.get('tzhour', 0))
  2851. tm[4] -= int(params.get('tzmin', 0))
  2852. else:
  2853. return None
  2854. # Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
  2855. # which is guaranteed to normalize d/m/y/h/m/s.
  2856. # Many implementations have bugs, but we'll pretend they don't.
  2857. return time.localtime(time.mktime(tuple(tm)))
  2858. registerDateHandler(_parse_date_iso8601)
  2859. # 8-bit date handling routines written by ytrewq1.
  2860. _korean_year = u'\ub144' # b3e2 in euc-kr
  2861. _korean_month = u'\uc6d4' # bff9 in euc-kr
  2862. _korean_day = u'\uc77c' # c0cf in euc-kr
  2863. _korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
  2864. _korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
  2865. _korean_onblog_date_re = \
  2866. re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
  2867. (_korean_year, _korean_month, _korean_day))
  2868. _korean_nate_date_re = \
  2869. re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
  2870. (_korean_am, _korean_pm))
  2871. def _parse_date_onblog(dateString):
  2872. '''Parse a string according to the OnBlog 8-bit date format'''
  2873. m = _korean_onblog_date_re.match(dateString)
  2874. if not m:
  2875. return
  2876. w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
  2877. {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
  2878. 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
  2879. 'zonediff': '+09:00'}
  2880. return _parse_date_w3dtf(w3dtfdate)
  2881. registerDateHandler(_parse_date_onblog)
  2882. def _parse_date_nate(dateString):
  2883. '''Parse a string according to the Nate 8-bit date format'''
  2884. m = _korean_nate_date_re.match(dateString)
  2885. if not m:
  2886. return
  2887. hour = int(m.group(5))
  2888. ampm = m.group(4)
  2889. if (ampm == _korean_pm):
  2890. hour += 12
  2891. hour = str(hour)
  2892. if len(hour) == 1:
  2893. hour = '0' + hour
  2894. w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
  2895. {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
  2896. 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
  2897. 'zonediff': '+09:00'}
  2898. return _parse_date_w3dtf(w3dtfdate)
  2899. registerDateHandler(_parse_date_nate)
  2900. # Unicode strings for Greek date strings
  2901. _greek_months = \
  2902. { \
  2903. u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
  2904. u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
  2905. u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
  2906. u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
  2907. u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
  2908. u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
  2909. u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
  2910. u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
  2911. u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
  2912. u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
  2913. u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
  2914. u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
  2915. u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
  2916. u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
  2917. u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
  2918. u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
  2919. u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
  2920. u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
  2921. u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
  2922. }
  2923. _greek_wdays = \
  2924. { \
  2925. u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
  2926. u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
  2927. u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
  2928. u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
  2929. u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
  2930. u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
  2931. u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
  2932. }
  2933. _greek_date_format_re = \
  2934. re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
  2935. def _parse_date_greek(dateString):
  2936. '''Parse a string according to a Greek 8-bit date format.'''
  2937. m = _greek_date_format_re.match(dateString)
  2938. if not m:
  2939. return
  2940. wday = _greek_wdays[m.group(1)]
  2941. month = _greek_months[m.group(3)]
  2942. rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
  2943. {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
  2944. 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
  2945. 'zonediff': m.group(8)}
  2946. return _parse_date_rfc822(rfc822date)
  2947. registerDateHandler(_parse_date_greek)
  2948. # Unicode strings for Hungarian date strings
  2949. _hungarian_months = \
  2950. { \
  2951. u'janu\u00e1r': u'01', # e1 in iso-8859-2
  2952. u'febru\u00e1ri': u'02', # e1 in iso-8859-2
  2953. u'm\u00e1rcius': u'03', # e1 in iso-8859-2
  2954. u'\u00e1prilis': u'04', # e1 in iso-8859-2
  2955. u'm\u00e1ujus': u'05', # e1 in iso-8859-2
  2956. u'j\u00fanius': u'06', # fa in iso-8859-2
  2957. u'j\u00falius': u'07', # fa in iso-8859-2
  2958. u'augusztus': u'08',
  2959. u'szeptember': u'09',
  2960. u'okt\u00f3ber': u'10', # f3 in iso-8859-2
  2961. u'november': u'11',
  2962. u'december': u'12',
  2963. }
  2964. _hungarian_date_format_re = \
  2965. re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
  2966. def _parse_date_hungarian(dateString):
  2967. '''Parse a string according to a Hungarian 8-bit date format.'''
  2968. m = _hungarian_date_format_re.match(dateString)
  2969. if not m or m.group(2) not in _hungarian_months:
  2970. return None
  2971. month = _hungarian_months[m.group(2)]
  2972. day = m.group(3)
  2973. if len(day) == 1:
  2974. day = '0' + day
  2975. hour = m.group(4)
  2976. if len(hour) == 1:
  2977. hour = '0' + hour
  2978. w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
  2979. {'year': m.group(1), 'month': month, 'day': day,\
  2980. 'hour': hour, 'minute': m.group(5),\
  2981. 'zonediff': m.group(6)}
  2982. return _parse_date_w3dtf(w3dtfdate)
  2983. registerDateHandler(_parse_date_hungarian)
  2984. # W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
  2985. # Drake and licensed under the Python license. Removed all range checking
  2986. # for month, day, hour, minute, and second, since mktime will normalize
  2987. # these later
  2988. # Modified to also support MSSQL-style datetimes as defined at:
  2989. # http://msdn.microsoft.com/en-us/library/ms186724.aspx
  2990. # (which basically means allowing a space as a date/time/timezone separator)
  2991. def _parse_date_w3dtf(dateString):
  2992. def __extract_date(m):
  2993. year = int(m.group('year'))
  2994. if year < 100:
  2995. year = 100 * int(time.gmtime()[0] / 100) + int(year)
  2996. if year < 1000:
  2997. return 0, 0, 0
  2998. julian = m.group('julian')
  2999. if julian:
  3000. julian = int(julian)
  3001. month = julian / 30 + 1
  3002. day = julian % 30 + 1
  3003. jday = None
  3004. while jday != julian:
  3005. t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
  3006. jday = time.gmtime(t)[-2]
  3007. diff = abs(jday - julian)
  3008. if jday > julian:
  3009. if diff < day:
  3010. day = day - diff
  3011. else:
  3012. month = month - 1
  3013. day = 31
  3014. elif jday < julian:
  3015. if day + diff < 28:
  3016. day = day + diff
  3017. else:
  3018. month = month + 1
  3019. return year, month, day
  3020. month = m.group('month')
  3021. day = 1
  3022. if month is None:
  3023. month = 1
  3024. else:
  3025. month = int(month)
  3026. day = m.group('day')
  3027. if day:
  3028. day = int(day)
  3029. else:
  3030. day = 1
  3031. return year, month, day
  3032. def __extract_time(m):
  3033. if not m:
  3034. return 0, 0, 0
  3035. hours = m.group('hours')
  3036. if not hours:
  3037. return 0, 0, 0
  3038. hours = int(hours)
  3039. minutes = int(m.group('minutes'))
  3040. seconds = m.group('seconds')
  3041. if seconds:
  3042. seconds = int(seconds)
  3043. else:
  3044. seconds = 0
  3045. return hours, minutes, seconds
  3046. def __extract_tzd(m):
  3047. '''Return the Time Zone Designator as an offset in seconds from UTC.'''
  3048. if not m:
  3049. return 0
  3050. tzd = m.group('tzd')
  3051. if not tzd:
  3052. return 0
  3053. if tzd == 'Z':
  3054. return 0
  3055. hours = int(m.group('tzdhours'))
  3056. minutes = m.group('tzdminutes')
  3057. if minutes:
  3058. minutes = int(minutes)
  3059. else:
  3060. minutes = 0
  3061. offset = (hours*60 + minutes) * 60
  3062. if tzd[0] == '+':
  3063. return -offset
  3064. return offset
  3065. __date_re = ('(?P<year>\d\d\d\d)'
  3066. '(?:(?P<dsep>-|)'
  3067. '(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?'
  3068. '|(?P<julian>\d\d\d)))?')
  3069. __tzd_re = ' ?(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)?'
  3070. __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
  3071. '(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?'
  3072. + __tzd_re)
  3073. __datetime_re = '%s(?:[T ]%s)?' % (__date_re, __time_re)
  3074. __datetime_rx = re.compile(__datetime_re)
  3075. m = __datetime_rx.match(dateString)
  3076. if (m is None) or (m.group() != dateString):
  3077. return
  3078. gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
  3079. if gmt[0] == 0:
  3080. return
  3081. return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
  3082. registerDateHandler(_parse_date_w3dtf)
  3083. # Define the strings used by the RFC822 datetime parser
  3084. _rfc822_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
  3085. 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
  3086. _rfc822_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
  3087. # Only the first three letters of the month name matter
  3088. _rfc822_month = "(?P<month>%s)(?:[a-z]*,?)" % ('|'.join(_rfc822_months))
  3089. # The year may be 2 or 4 digits; capture the century if it exists
  3090. _rfc822_year = "(?P<year>(?:\d{2})?\d{2})"
  3091. _rfc822_day = "(?P<day> *\d{1,2})"
  3092. _rfc822_date = "%s %s %s" % (_rfc822_day, _rfc822_month, _rfc822_year)
  3093. _rfc822_hour = "(?P<hour>\d{2}):(?P<minute>\d{2})(?::(?P<second>\d{2}))?"
  3094. _rfc822_tz = "(?P<tz>ut|gmt(?:[+-]\d{2}:\d{2})?|[aecmp][sd]?t|[zamny]|[+-]\d{4})"
  3095. _rfc822_tznames = {
  3096. 'ut': 0, 'gmt': 0, 'z': 0,
  3097. 'adt': -3, 'ast': -4, 'at': -4,
  3098. 'edt': -4, 'est': -5, 'et': -5,
  3099. 'cdt': -5, 'cst': -6, 'ct': -6,
  3100. 'mdt': -6, 'mst': -7, 'mt': -7,
  3101. 'pdt': -7, 'pst': -8, 'pt': -8,
  3102. 'a': -1, 'n': 1,
  3103. 'm': -12, 'y': 12,
  3104. }
  3105. # The timezone may be prefixed by 'Etc/'
  3106. _rfc822_time = "%s (?:etc/)?%s" % (_rfc822_hour, _rfc822_tz)
  3107. _rfc822_dayname = "(?P<dayname>%s)" % ('|'.join(_rfc822_daynames))
  3108. _rfc822_match = re.compile(
  3109. "(?:%s, )?%s(?: %s)?" % (_rfc822_dayname, _rfc822_date, _rfc822_time)
  3110. ).match
  3111. def _parse_date_group_rfc822(m):
  3112. # Calculate a date and timestamp
  3113. for k in ('year', 'day', 'hour', 'minute', 'second'):
  3114. m[k] = int(m[k])
  3115. m['month'] = _rfc822_months.index(m['month']) + 1
  3116. # If the year is 2 digits, assume everything in the 90's is the 1990's
  3117. if m['year'] < 100:
  3118. m['year'] += (1900, 2000)[m['year'] < 90]
  3119. stamp = datetime.datetime(*[m[i] for i in
  3120. ('year', 'month', 'day', 'hour', 'minute', 'second')])
  3121. # Use the timezone information to calculate the difference between
  3122. # the given date and timestamp and Universal Coordinated Time
  3123. tzhour = 0
  3124. tzmin = 0
  3125. if m['tz'] and m['tz'].startswith('gmt'):
  3126. # Handle GMT and GMT+hh:mm timezone syntax (the trailing
  3127. # timezone info will be handled by the next `if` block)
  3128. m['tz'] = ''.join(m['tz'][3:].split(':')) or 'gmt'
  3129. if not m['tz']:
  3130. pass
  3131. elif m['tz'].startswith('+'):
  3132. tzhour = int(m['tz'][1:3])
  3133. tzmin = int(m['tz'][3:])
  3134. elif m['tz'].startswith('-'):
  3135. tzhour = int(m['tz'][1:3]) * -1
  3136. tzmin = int(m['tz'][3:]) * -1
  3137. else:
  3138. tzhour = _rfc822_tznames[m['tz']]
  3139. delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
  3140. # Return the date and timestamp in UTC
  3141. return (stamp - delta).utctimetuple()
  3142. def _parse_date_rfc822(dt):
  3143. """Parse RFC 822 dates and times, with one minor
  3144. difference: years may be 4DIGIT or 2DIGIT.
  3145. http://tools.ietf.org/html/rfc822#section-5"""
  3146. try:
  3147. m = _rfc822_match(dt.lower()).groupdict(0)
  3148. except AttributeError:
  3149. return None
  3150. return _parse_date_group_rfc822(m)
  3151. registerDateHandler(_parse_date_rfc822)
  3152. def _parse_date_rfc822_grubby(dt):
  3153. """Parse date format similar to RFC 822, but
  3154. the comma after the dayname is optional and
  3155. month/day are inverted"""
  3156. _rfc822_date_grubby = "%s %s %s" % (_rfc822_month, _rfc822_day, _rfc822_year)
  3157. _rfc822_match_grubby = re.compile(
  3158. "(?:%s[,]? )?%s(?: %s)?" % (_rfc822_dayname, _rfc822_date_grubby, _rfc822_time)
  3159. ).match
  3160. try:
  3161. m = _rfc822_match_grubby(dt.lower()).groupdict(0)
  3162. except AttributeError:
  3163. return None
  3164. return _parse_date_group_rfc822(m)
  3165. registerDateHandler(_parse_date_rfc822_grubby)
  3166. def _parse_date_asctime(dt):
  3167. """Parse asctime-style dates"""
  3168. dayname, month, day, remainder = dt.split(None, 3)
  3169. # Convert month and day into zero-padded integers
  3170. month = '%02i ' % (_rfc822_months.index(month.lower()) + 1)
  3171. day = '%02i ' % (int(day),)
  3172. dt = month + day + remainder
  3173. return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, )
  3174. registerDateHandler(_parse_date_asctime)
  3175. def _parse_date_perforce(aDateString):
  3176. """parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
  3177. # Fri, 2006/09/15 08:19:53 EDT
  3178. _my_date_pattern = re.compile( \
  3179. r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
  3180. m = _my_date_pattern.search(aDateString)
  3181. if m is None:
  3182. return None
  3183. dow, year, month, day, hour, minute, second, tz = m.groups()
  3184. months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
  3185. dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
  3186. tm = rfc822.parsedate_tz(dateString)
  3187. if tm:
  3188. return time.gmtime(rfc822.mktime_tz(tm))
  3189. registerDateHandler(_parse_date_perforce)
  3190. def _parse_date(dateString):
  3191. '''Parses a variety of date formats into a 9-tuple in GMT'''
  3192. if not dateString:
  3193. return None
  3194. for handler in _date_handlers:
  3195. try:
  3196. date9tuple = handler(dateString)
  3197. except (KeyError, OverflowError, ValueError):
  3198. continue
  3199. if not date9tuple:
  3200. continue
  3201. if len(date9tuple) != 9:
  3202. continue
  3203. return date9tuple
  3204. return None
  3205. # Each marker represents some of the characters of the opening XML
  3206. # processing instruction ('<?xm') in the specified encoding.
  3207. EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94])
  3208. UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F])
  3209. UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00])
  3210. UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C])
  3211. UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00])
  3212. ZERO_BYTES = _l2bytes([0x00, 0x00])
  3213. # Match the opening XML declaration.
  3214. # Example: <?xml version="1.0" encoding="utf-8"?>
  3215. RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
  3216. # Capture the value of the XML processing instruction's encoding attribute.
  3217. # Example: <?xml version="1.0" encoding="utf-8"?>
  3218. RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
  3219. def convert_to_utf8(http_headers, data):
  3220. '''Detect and convert the character encoding to UTF-8.
  3221. http_headers is a dictionary
  3222. data is a raw string (not Unicode)'''
  3223. # This is so much trickier than it sounds, it's not even funny.
  3224. # According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
  3225. # is application/xml, application/*+xml,
  3226. # application/xml-external-parsed-entity, or application/xml-dtd,
  3227. # the encoding given in the charset parameter of the HTTP Content-Type
  3228. # takes precedence over the encoding given in the XML prefix within the
  3229. # document, and defaults to 'utf-8' if neither are specified. But, if
  3230. # the HTTP Content-Type is text/xml, text/*+xml, or
  3231. # text/xml-external-parsed-entity, the encoding given in the XML prefix
  3232. # within the document is ALWAYS IGNORED and only the encoding given in
  3233. # the charset parameter of the HTTP Content-Type header should be
  3234. # respected, and it defaults to 'us-ascii' if not specified.
  3235. # Furthermore, discussion on the atom-syntax mailing list with the
  3236. # author of RFC 3023 leads me to the conclusion that any document
  3237. # served with a Content-Type of text/* and no charset parameter
  3238. # must be treated as us-ascii. (We now do this.) And also that it
  3239. # must always be flagged as non-well-formed. (We now do this too.)
  3240. # If Content-Type is unspecified (input was local file or non-HTTP source)
  3241. # or unrecognized (server just got it totally wrong), then go by the
  3242. # encoding given in the XML prefix of the document and default to
  3243. # 'iso-8859-1' as per the HTTP specification (RFC 2616).
  3244. # Then, assuming we didn't find a character encoding in the HTTP headers
  3245. # (and the HTTP Content-type allowed us to look in the body), we need
  3246. # to sniff the first few bytes of the XML data and try to determine
  3247. # whether the encoding is ASCII-compatible. Section F of the XML
  3248. # specification shows the way here:
  3249. # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
  3250. # If the sniffed encoding is not ASCII-compatible, we need to make it
  3251. # ASCII compatible so that we can sniff further into the XML declaration
  3252. # to find the encoding attribute, which will tell us the true encoding.
  3253. # Of course, none of this guarantees that we will be able to parse the
  3254. # feed in the declared character encoding (assuming it was declared
  3255. # correctly, which many are not). iconv_codec can help a lot;
  3256. # you should definitely install it if you can.
  3257. # http://cjkpython.i18n.org/
  3258. bom_encoding = u''
  3259. xml_encoding = u''
  3260. rfc3023_encoding = u''
  3261. # Look at the first few bytes of the document to guess what
  3262. # its encoding may be. We only need to decode enough of the
  3263. # document that we can use an ASCII-compatible regular
  3264. # expression to search for an XML encoding declaration.
  3265. # The heuristic follows the XML specification, section F:
  3266. # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
  3267. # Check for BOMs first.
  3268. if data[:4] == codecs.BOM_UTF32_BE:
  3269. bom_encoding = u'utf-32be'
  3270. data = data[4:]
  3271. elif data[:4] == codecs.BOM_UTF32_LE:
  3272. bom_encoding = u'utf-32le'
  3273. data = data[4:]
  3274. elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
  3275. bom_encoding = u'utf-16be'
  3276. data = data[2:]
  3277. elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
  3278. bom_encoding = u'utf-16le'
  3279. data = data[2:]
  3280. elif data[:3] == codecs.BOM_UTF8:
  3281. bom_encoding = u'utf-8'
  3282. data = data[3:]
  3283. # Check for the characters '<?xm' in several encodings.
  3284. elif data[:4] == EBCDIC_MARKER:
  3285. bom_encoding = u'cp037'
  3286. elif data[:4] == UTF16BE_MARKER:
  3287. bom_encoding = u'utf-16be'
  3288. elif data[:4] == UTF16LE_MARKER:
  3289. bom_encoding = u'utf-16le'
  3290. elif data[:4] == UTF32BE_MARKER:
  3291. bom_encoding = u'utf-32be'
  3292. elif data[:4] == UTF32LE_MARKER:
  3293. bom_encoding = u'utf-32le'
  3294. tempdata = data
  3295. try:
  3296. if bom_encoding:
  3297. tempdata = data.decode(bom_encoding).encode('utf-8')
  3298. except (UnicodeDecodeError, LookupError):
  3299. # feedparser recognizes UTF-32 encodings that aren't
  3300. # available in Python 2.4 and 2.5, so it's possible to
  3301. # encounter a LookupError during decoding.
  3302. xml_encoding_match = None
  3303. else:
  3304. xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
  3305. if xml_encoding_match:
  3306. xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
  3307. # Normalize the xml_encoding if necessary.
  3308. if bom_encoding and (xml_encoding in (
  3309. u'u16', u'utf-16', u'utf16', u'utf_16',
  3310. u'u32', u'utf-32', u'utf32', u'utf_32',
  3311. u'iso-10646-ucs-2', u'iso-10646-ucs-4',
  3312. u'csucs4', u'csunicode', u'ucs-2', u'ucs-4'
  3313. )):
  3314. xml_encoding = bom_encoding
  3315. # Find the HTTP Content-Type and, hopefully, a character
  3316. # encoding provided by the server. The Content-Type is used
  3317. # to choose the "correct" encoding among the BOM encoding,
  3318. # XML declaration encoding, and HTTP encoding, following the
  3319. # heuristic defined in RFC 3023.
  3320. http_content_type = http_headers.get('content-type') or ''
  3321. http_content_type, params = cgi.parse_header(http_content_type)
  3322. http_encoding = params.get('charset', '').replace("'", "")
  3323. if not isinstance(http_encoding, unicode):
  3324. http_encoding = http_encoding.decode('utf-8', 'ignore')
  3325. acceptable_content_type = 0
  3326. application_content_types = (u'application/xml', u'application/xml-dtd',
  3327. u'application/xml-external-parsed-entity')
  3328. text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
  3329. if (http_content_type in application_content_types) or \
  3330. (http_content_type.startswith(u'application/') and
  3331. http_content_type.endswith(u'+xml')):
  3332. acceptable_content_type = 1
  3333. rfc3023_encoding = http_encoding or xml_encoding or u'utf-8'
  3334. elif (http_content_type in text_content_types) or \
  3335. (http_content_type.startswith(u'text/') and
  3336. http_content_type.endswith(u'+xml')):
  3337. acceptable_content_type = 1
  3338. rfc3023_encoding = http_encoding or u'us-ascii'
  3339. elif http_content_type.startswith(u'text/'):
  3340. rfc3023_encoding = http_encoding or u'us-ascii'
  3341. elif http_headers and 'content-type' not in http_headers:
  3342. rfc3023_encoding = xml_encoding or u'iso-8859-1'
  3343. else:
  3344. rfc3023_encoding = xml_encoding or u'utf-8'
  3345. # gb18030 is a superset of gb2312, so always replace gb2312
  3346. # with gb18030 for greater compatibility.
  3347. if rfc3023_encoding.lower() == u'gb2312':
  3348. rfc3023_encoding = u'gb18030'
  3349. if xml_encoding.lower() == u'gb2312':
  3350. xml_encoding = u'gb18030'
  3351. # there are four encodings to keep track of:
  3352. # - http_encoding is the encoding declared in the Content-Type HTTP header
  3353. # - xml_encoding is the encoding declared in the <?xml declaration
  3354. # - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
  3355. # - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
  3356. error = None
  3357. if http_headers and (not acceptable_content_type):
  3358. if 'content-type' in http_headers:
  3359. msg = '%s is not an XML media type' % http_headers['content-type']
  3360. else:
  3361. msg = 'no Content-type specified'
  3362. error = NonXMLContentType(msg)
  3363. # determine character encoding
  3364. known_encoding = 0
  3365. chardet_encoding = None
  3366. tried_encodings = []
  3367. if chardet:
  3368. chardet_encoding = unicode(chardet.detect(data)['encoding'] or '', 'ascii', 'ignore')
  3369. # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
  3370. for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
  3371. chardet_encoding, u'utf-8', u'windows-1252', u'iso-8859-2'):
  3372. if not proposed_encoding:
  3373. continue
  3374. if proposed_encoding in tried_encodings:
  3375. continue
  3376. tried_encodings.append(proposed_encoding)
  3377. try:
  3378. data = data.decode(proposed_encoding)
  3379. except (UnicodeDecodeError, LookupError):
  3380. pass
  3381. else:
  3382. known_encoding = 1
  3383. # Update the encoding in the opening XML processing instruction.
  3384. new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
  3385. if RE_XML_DECLARATION.search(data):
  3386. data = RE_XML_DECLARATION.sub(new_declaration, data)
  3387. else:
  3388. data = new_declaration + u'\n' + data
  3389. data = data.encode('utf-8')
  3390. break
  3391. # if still no luck, give up
  3392. if not known_encoding:
  3393. error = CharacterEncodingUnknown(
  3394. 'document encoding unknown, I tried ' +
  3395. '%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
  3396. (rfc3023_encoding, xml_encoding))
  3397. rfc3023_encoding = u''
  3398. elif proposed_encoding != rfc3023_encoding:
  3399. error = CharacterEncodingOverride(
  3400. 'document declared as %s, but parsed as %s' %
  3401. (rfc3023_encoding, proposed_encoding))
  3402. rfc3023_encoding = proposed_encoding
  3403. return data, rfc3023_encoding, error
  3404. # Match XML entity declarations.
  3405. # Example: <!ENTITY copyright "(C)">
  3406. RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
  3407. # Match XML DOCTYPE declarations.
  3408. # Example: <!DOCTYPE feed [ ]>
  3409. RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
  3410. # Match safe entity declarations.
  3411. # This will allow hexadecimal character references through,
  3412. # as well as text, but not arbitrary nested entities.
  3413. # Example: cubed "&#179;"
  3414. # Example: copyright "(C)"
  3415. # Forbidden: explode1 "&explode2;&explode2;"
  3416. RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
  3417. def replace_doctype(data):
  3418. '''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
  3419. rss_version may be 'rss091n' or None
  3420. stripped_data is the same XML document with a replaced DOCTYPE
  3421. '''
  3422. # Divide the document into two groups by finding the location
  3423. # of the first element that doesn't begin with '<?' or '<!'.
  3424. start = re.search(_s2bytes('<\w'), data)
  3425. start = start and start.start() or -1
  3426. head, data = data[:start+1], data[start+1:]
  3427. # Save and then remove all of the ENTITY declarations.
  3428. entity_results = RE_ENTITY_PATTERN.findall(head)
  3429. head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
  3430. # Find the DOCTYPE declaration and check the feed type.
  3431. doctype_results = RE_DOCTYPE_PATTERN.findall(head)
  3432. doctype = doctype_results and doctype_results[0] or _s2bytes('')
  3433. if _s2bytes('netscape') in doctype.lower():
  3434. version = u'rss091n'
  3435. else:
  3436. version = None
  3437. # Re-insert the safe ENTITY declarations if a DOCTYPE was found.
  3438. replacement = _s2bytes('')
  3439. if len(doctype_results) == 1 and entity_results:
  3440. match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
  3441. safe_entities = filter(match_safe_entities, entity_results)
  3442. if safe_entities:
  3443. replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \
  3444. + _s2bytes('>\n<!ENTITY ').join(safe_entities) \
  3445. + _s2bytes('>\n]>')
  3446. data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
  3447. # Precompute the safe entities for the loose parser.
  3448. safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
  3449. for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
  3450. return version, data, safe_entities
  3451. def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
  3452. '''Parse a feed from a URL, file, stream, or string.
  3453. request_headers, if given, is a dict from http header name to value to add
  3454. to the request; this overrides internally generated values.
  3455. '''
  3456. if handlers is None:
  3457. handlers = []
  3458. if request_headers is None:
  3459. request_headers = {}
  3460. if response_headers is None:
  3461. response_headers = {}
  3462. result = FeedParserDict()
  3463. result['feed'] = FeedParserDict()
  3464. result['entries'] = []
  3465. result['bozo'] = 0
  3466. if not isinstance(handlers, list):
  3467. handlers = [handlers]
  3468. try:
  3469. f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
  3470. data = f.read()
  3471. except Exception, e:
  3472. result['bozo'] = 1
  3473. result['bozo_exception'] = e
  3474. data = None
  3475. f = None
  3476. if hasattr(f, 'headers'):
  3477. result['headers'] = dict(f.headers)
  3478. # overwrite existing headers using response_headers
  3479. if 'headers' in result:
  3480. result['headers'].update(response_headers)
  3481. elif response_headers:
  3482. result['headers'] = copy.deepcopy(response_headers)
  3483. # lowercase all of the HTTP headers for comparisons per RFC 2616
  3484. if 'headers' in result:
  3485. http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
  3486. else:
  3487. http_headers = {}
  3488. # if feed is gzip-compressed, decompress it
  3489. if f and data and http_headers:
  3490. if gzip and 'gzip' in http_headers.get('content-encoding', ''):
  3491. try:
  3492. data = gzip.GzipFile(fileobj=_StringIO(data)).read()
  3493. except (IOError, struct.error), e:
  3494. # IOError can occur if the gzip header is bad.
  3495. # struct.error can occur if the data is damaged.
  3496. result['bozo'] = 1
  3497. result['bozo_exception'] = e
  3498. if isinstance(e, struct.error):
  3499. # A gzip header was found but the data is corrupt.
  3500. # Ideally, we should re-request the feed without the
  3501. # 'Accept-encoding: gzip' header, but we don't.
  3502. data = None
  3503. elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
  3504. try:
  3505. data = zlib.decompress(data)
  3506. except zlib.error, e:
  3507. try:
  3508. # The data may have no headers and no checksum.
  3509. data = zlib.decompress(data, -15)
  3510. except zlib.error, e:
  3511. result['bozo'] = 1
  3512. result['bozo_exception'] = e
  3513. # save HTTP headers
  3514. if http_headers:
  3515. if 'etag' in http_headers:
  3516. etag = http_headers.get('etag', u'')
  3517. if not isinstance(etag, unicode):
  3518. etag = etag.decode('utf-8', 'ignore')
  3519. if etag:
  3520. result['etag'] = etag
  3521. if 'last-modified' in http_headers:
  3522. modified = http_headers.get('last-modified', u'')
  3523. if modified:
  3524. result['modified'] = modified
  3525. result['modified_parsed'] = _parse_date(modified)
  3526. if hasattr(f, 'url'):
  3527. if not isinstance(f.url, unicode):
  3528. result['href'] = f.url.decode('utf-8', 'ignore')
  3529. else:
  3530. result['href'] = f.url
  3531. result['status'] = 200
  3532. if hasattr(f, 'status'):
  3533. result['status'] = f.status
  3534. if hasattr(f, 'close'):
  3535. f.close()
  3536. if data is None:
  3537. return result
  3538. # Stop processing if the server sent HTTP 304 Not Modified.
  3539. if getattr(f, 'code', 0) == 304:
  3540. result['version'] = u''
  3541. result['debug_message'] = 'The feed has not changed since you last checked, ' + \
  3542. 'so the server sent no data. This is a feature, not a bug!'
  3543. return result
  3544. data, result['encoding'], error = convert_to_utf8(http_headers, data)
  3545. use_strict_parser = result['encoding'] and True or False
  3546. if error is not None:
  3547. result['bozo'] = 1
  3548. result['bozo_exception'] = error
  3549. result['version'], data, entities = replace_doctype(data)
  3550. # Ensure that baseuri is an absolute URI using an acceptable URI scheme.
  3551. contentloc = http_headers.get('content-location', u'')
  3552. href = result.get('href', u'')
  3553. baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
  3554. baselang = http_headers.get('content-language', None)
  3555. if not isinstance(baselang, unicode) and baselang is not None:
  3556. baselang = baselang.decode('utf-8', 'ignore')
  3557. if not _XML_AVAILABLE:
  3558. use_strict_parser = 0
  3559. if use_strict_parser:
  3560. # initialize the SAX parser
  3561. feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
  3562. saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
  3563. saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
  3564. try:
  3565. # disable downloading external doctype references, if possible
  3566. saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
  3567. except xml.sax.SAXNotSupportedException:
  3568. pass
  3569. saxparser.setContentHandler(feedparser)
  3570. saxparser.setErrorHandler(feedparser)
  3571. source = xml.sax.xmlreader.InputSource()
  3572. source.setByteStream(_StringIO(data))
  3573. try:
  3574. saxparser.parse(source)
  3575. except xml.sax.SAXException, e:
  3576. result['bozo'] = 1
  3577. result['bozo_exception'] = feedparser.exc or e
  3578. use_strict_parser = 0
  3579. if not use_strict_parser and _SGML_AVAILABLE:
  3580. feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
  3581. feedparser.feed(data.decode('utf-8', 'replace'))
  3582. result['feed'] = feedparser.feeddata
  3583. result['entries'] = feedparser.entries
  3584. result['version'] = result['version'] or feedparser.version
  3585. result['namespaces'] = feedparser.namespacesInUse
  3586. return result