PageRenderTime 47ms CodeModel.GetById 11ms RepoModel.GetById 0ms app.codeStats 0ms

/fanficdownloader/html2text.py

https://code.google.com/p/fanficdownloader/
Python | 453 lines | 400 code | 38 blank | 15 comment | 55 complexity | 4879f7238aaafcb4888ba67d2602439a MD5 | raw file
Possible License(s): MIT
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. """html2text: Turn HTML into equivalent Markdown-structured text."""
  4. __version__ = "2.37"
  5. __author__ = "Aaron Swartz (me@aaronsw.com)"
  6. __copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
  7. __contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
  8. # TODO:
  9. # Support decoded entities with unifiable.
  10. if not hasattr(__builtins__, 'True'): True, False = 1, 0
  11. import re, sys, urllib, htmlentitydefs, codecs, StringIO, types
  12. import sgmllib
  13. import urlparse
  14. sgmllib.charref = re.compile('&#([xX]?[0-9a-fA-F]+)[^0-9a-fA-F]')
  15. try: from textwrap import wrap
  16. except: pass
  17. # Use Unicode characters instead of their ascii psuedo-replacements
  18. UNICODE_SNOB = 0
  19. # Put the links after each paragraph instead of at the end.
  20. LINKS_EACH_PARAGRAPH = 0
  21. # Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
  22. BODY_WIDTH = 78
  23. # Don't show internal links (href="#local-anchor") -- corresponding link targets
  24. # won't be visible in the plain text file anyway.
  25. SKIP_INTERNAL_LINKS = False
  26. ### Entity Nonsense ###
  27. def name2cp(k):
  28. if k == 'apos': return ord("'")
  29. if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
  30. return htmlentitydefs.name2codepoint[k]
  31. else:
  32. k = htmlentitydefs.entitydefs[k]
  33. if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
  34. return ord(codecs.latin_1_decode(k)[0])
  35. unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
  36. 'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
  37. 'ndash':'-', 'oelig':'oe', 'aelig':'ae',
  38. 'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
  39. 'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
  40. 'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
  41. 'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
  42. 'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u'}
  43. unifiable_n = {}
  44. for k in unifiable.keys():
  45. unifiable_n[name2cp(k)] = unifiable[k]
  46. def charref(name):
  47. if name[0] in ['x','X']:
  48. c = int(name[1:], 16)
  49. else:
  50. c = int(name)
  51. if not UNICODE_SNOB and c in unifiable_n.keys():
  52. return unifiable_n[c]
  53. else:
  54. return unichr(c)
  55. def entityref(c):
  56. if not UNICODE_SNOB and c in unifiable.keys():
  57. return unifiable[c]
  58. else:
  59. try: name2cp(c)
  60. except KeyError: return "&" + c
  61. else: return unichr(name2cp(c))
  62. def replaceEntities(s):
  63. s = s.group(1)
  64. if s[0] == "#":
  65. return charref(s[1:])
  66. else: return entityref(s)
  67. r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
  68. def unescape(s):
  69. return r_unescape.sub(replaceEntities, s)
  70. def fixattrs(attrs):
  71. # Fix bug in sgmllib.py
  72. if not attrs: return attrs
  73. newattrs = []
  74. for attr in attrs:
  75. newattrs.append((attr[0], unescape(attr[1])))
  76. return newattrs
  77. ### End Entity Nonsense ###
  78. def onlywhite(line):
  79. """Return true if the line does only consist of whitespace characters."""
  80. for c in line:
  81. if c is not ' ' and c is not ' ':
  82. return c is ' '
  83. return line
  84. def optwrap(text,wrap_width=BODY_WIDTH):
  85. """Wrap all paragraphs in the provided text."""
  86. if not wrap_width:
  87. return text
  88. assert wrap, "Requires Python 2.3."
  89. result = ''
  90. newlines = 0
  91. for para in text.split("\n"):
  92. if len(para) > 0:
  93. if para[0] is not ' ' and para[0] is not '-' and para[0] is not '*':
  94. for line in wrap(para, wrap_width):
  95. result += line + "\n"
  96. result += "\n"
  97. newlines = 2
  98. else:
  99. if not onlywhite(para):
  100. result += para + "\n"
  101. newlines = 1
  102. else:
  103. if newlines < 2:
  104. result += "\n"
  105. newlines += 1
  106. return result
  107. def hn(tag):
  108. if tag[0] == 'h' and len(tag) == 2:
  109. try:
  110. n = int(tag[1])
  111. if n in range(1, 10): return n
  112. except ValueError: return 0
  113. class _html2text(sgmllib.SGMLParser):
  114. def __init__(self, out=None, baseurl=''):
  115. sgmllib.SGMLParser.__init__(self)
  116. if out is None: self.out = self.outtextf
  117. else: self.out = out
  118. self.outtext = u''
  119. self.quiet = 0
  120. self.p_p = 0
  121. self.outcount = 0
  122. self.start = 1
  123. self.space = 0
  124. self.a = []
  125. self.astack = []
  126. self.acount = 0
  127. self.list = []
  128. self.blockquote = 0
  129. self.pre = 0
  130. self.startpre = 0
  131. self.lastWasNL = 0
  132. self.abbr_title = None # current abbreviation definition
  133. self.abbr_data = None # last inner HTML (for abbr being defined)
  134. self.abbr_list = {} # stack of abbreviations to write later
  135. self.baseurl = baseurl
  136. def outtextf(self, s):
  137. self.outtext += s
  138. def close(self):
  139. sgmllib.SGMLParser.close(self)
  140. self.pbr()
  141. self.o('', 0, 'end')
  142. return self.outtext
  143. def handle_charref(self, c):
  144. self.o(charref(c))
  145. def handle_entityref(self, c):
  146. self.o(entityref(c))
  147. def unknown_starttag(self, tag, attrs):
  148. self.handle_tag(tag, attrs, 1)
  149. def unknown_endtag(self, tag):
  150. self.handle_tag(tag, None, 0)
  151. def previousIndex(self, attrs):
  152. """ returns the index of certain set of attributes (of a link) in the
  153. self.a list
  154. If the set of attributes is not found, returns None
  155. """
  156. if not attrs.has_key('href'): return None
  157. i = -1
  158. for a in self.a:
  159. i += 1
  160. match = 0
  161. if a.has_key('href') and a['href'] == attrs['href']:
  162. if a.has_key('title') or attrs.has_key('title'):
  163. if (a.has_key('title') and attrs.has_key('title') and
  164. a['title'] == attrs['title']):
  165. match = True
  166. else:
  167. match = True
  168. if match: return i
  169. def handle_tag(self, tag, attrs, start):
  170. attrs = fixattrs(attrs)
  171. if hn(tag):
  172. self.p()
  173. if start: self.o(hn(tag)*"#" + ' ')
  174. if tag in ['p', 'div']: self.p()
  175. if tag == "br" and start: self.o(" \n")
  176. if tag == "hr" and start:
  177. self.p()
  178. self.o("* * *")
  179. self.p()
  180. if tag in ["head", "style", 'script']:
  181. if start: self.quiet += 1
  182. else: self.quiet -= 1
  183. if tag in ["body"]:
  184. self.quiet = 0 # sites like 9rules.com never close <head>
  185. if tag == "blockquote":
  186. if start:
  187. self.p(); self.o('> ', 0, 1); self.start = 1
  188. self.blockquote += 1
  189. else:
  190. self.blockquote -= 1
  191. self.p()
  192. if tag in ['em', 'i', 'u']: self.o("_")
  193. if tag in ['strong', 'b']: self.o("**")
  194. if tag == "code" and not self.pre: self.o('`') #TODO: `` `this` ``
  195. if tag == "abbr":
  196. if start:
  197. attrsD = {}
  198. for (x, y) in attrs: attrsD[x] = y
  199. attrs = attrsD
  200. self.abbr_title = None
  201. self.abbr_data = ''
  202. if attrs.has_key('title'):
  203. self.abbr_title = attrs['title']
  204. else:
  205. if self.abbr_title != None:
  206. self.abbr_list[self.abbr_data] = self.abbr_title
  207. self.abbr_title = None
  208. self.abbr_data = ''
  209. if tag == "a":
  210. if start:
  211. attrsD = {}
  212. for (x, y) in attrs: attrsD[x] = y
  213. attrs = attrsD
  214. if attrs.has_key('href') and not (SKIP_INTERNAL_LINKS and attrs['href'].startswith('#')):
  215. self.astack.append(attrs)
  216. self.o("[")
  217. else:
  218. self.astack.append(None)
  219. else:
  220. if self.astack:
  221. a = self.astack.pop()
  222. if a:
  223. i = self.previousIndex(a)
  224. if i is not None:
  225. a = self.a[i]
  226. else:
  227. self.acount += 1
  228. a['count'] = self.acount
  229. a['outcount'] = self.outcount
  230. self.a.append(a)
  231. self.o("][" + `a['count']` + "]")
  232. if tag == "img" and start:
  233. attrsD = {}
  234. for (x, y) in attrs: attrsD[x] = y
  235. attrs = attrsD
  236. if attrs.has_key('src'):
  237. attrs['href'] = attrs['src']
  238. alt = attrs.get('alt', '')
  239. i = self.previousIndex(attrs)
  240. if i is not None:
  241. attrs = self.a[i]
  242. else:
  243. self.acount += 1
  244. attrs['count'] = self.acount
  245. attrs['outcount'] = self.outcount
  246. self.a.append(attrs)
  247. self.o("![")
  248. self.o(alt)
  249. self.o("]["+`attrs['count']`+"]")
  250. if tag == 'dl' and start: self.p()
  251. if tag == 'dt' and not start: self.pbr()
  252. if tag == 'dd' and start: self.o(' ')
  253. if tag == 'dd' and not start: self.pbr()
  254. if tag in ["ol", "ul"]:
  255. if start:
  256. self.list.append({'name':tag, 'num':0})
  257. else:
  258. if self.list: self.list.pop()
  259. self.p()
  260. if tag == 'li':
  261. if start:
  262. self.pbr()
  263. if self.list: li = self.list[-1]
  264. else: li = {'name':'ul', 'num':0}
  265. self.o(" "*len(self.list)) #TODO: line up <ol><li>s > 9 correctly.
  266. if li['name'] == "ul": self.o("* ")
  267. elif li['name'] == "ol":
  268. li['num'] += 1
  269. self.o(`li['num']`+". ")
  270. self.start = 1
  271. else:
  272. self.pbr()
  273. if tag in ["table", "tr"] and start: self.p()
  274. if tag == 'td': self.pbr()
  275. if tag == "pre":
  276. if start:
  277. self.startpre = 1
  278. self.pre = 1
  279. else:
  280. self.pre = 0
  281. self.p()
  282. def pbr(self):
  283. if self.p_p == 0: self.p_p = 1
  284. def p(self): self.p_p = 2
  285. def o(self, data, puredata=0, force=0):
  286. if self.abbr_data is not None: self.abbr_data += data
  287. if not self.quiet:
  288. if puredata and not self.pre:
  289. data = re.sub('\s+', ' ', data)
  290. if data and data[0] == ' ':
  291. self.space = 1
  292. data = data[1:]
  293. if not data and not force: return
  294. if self.startpre:
  295. #self.out(" :") #TODO: not output when already one there
  296. self.startpre = 0
  297. bq = (">" * self.blockquote)
  298. if not (force and data and data[0] == ">") and self.blockquote: bq += " "
  299. if self.pre:
  300. bq += " "
  301. data = data.replace("\n", "\n"+bq)
  302. if self.start:
  303. self.space = 0
  304. self.p_p = 0
  305. self.start = 0
  306. if force == 'end':
  307. # It's the end.
  308. self.p_p = 0
  309. self.out("\n")
  310. self.space = 0
  311. if self.p_p:
  312. self.out(('\n'+bq)*self.p_p)
  313. self.space = 0
  314. if self.space:
  315. if not self.lastWasNL: self.out(' ')
  316. self.space = 0
  317. if self.a and ((self.p_p == 2 and LINKS_EACH_PARAGRAPH) or force == "end"):
  318. if force == "end": self.out("\n")
  319. newa = []
  320. for link in self.a:
  321. if self.outcount > link['outcount']:
  322. self.out(" ["+`link['count']`+"]: " + urlparse.urljoin(self.baseurl, link['href']))
  323. if link.has_key('title'): self.out(" ("+link['title']+")")
  324. self.out("\n")
  325. else:
  326. newa.append(link)
  327. if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
  328. self.a = newa
  329. if self.abbr_list and force == "end":
  330. for abbr, definition in self.abbr_list.items():
  331. self.out(" *[" + abbr + "]: " + definition + "\n")
  332. self.p_p = 0
  333. self.out(data)
  334. self.lastWasNL = data and data[-1] == '\n'
  335. self.outcount += 1
  336. def handle_data(self, data):
  337. if r'\/script>' in data: self.quiet -= 1
  338. self.o(data, 1)
  339. def unknown_decl(self, data): pass
  340. def wrapwrite(text): sys.stdout.write(text.encode('utf8'))
  341. def html2text_file(html, out=wrapwrite, baseurl=''):
  342. h = _html2text(out, baseurl)
  343. h.feed(html)
  344. h.feed("")
  345. return h.close()
  346. def html2text(html, baseurl='', wrap_width=BODY_WIDTH):
  347. return optwrap(html2text_file(html, None, baseurl),wrap_width)
  348. if __name__ == "__main__":
  349. baseurl = ''
  350. if sys.argv[1:]:
  351. arg = sys.argv[1]
  352. if arg.startswith('http://'):
  353. baseurl = arg
  354. j = urllib.urlopen(baseurl)
  355. try:
  356. from feedparser import _getCharacterEncoding as enc
  357. except ImportError:
  358. enc = lambda x, y: ('utf-8', 1)
  359. text = j.read()
  360. encoding = enc(j.headers, text)[0]
  361. if encoding == 'us-ascii': encoding = 'utf-8'
  362. data = text.decode(encoding)
  363. else:
  364. encoding = 'utf8'
  365. if len(sys.argv) > 2:
  366. encoding = sys.argv[2]
  367. data = open(arg, 'r').read().decode(encoding)
  368. else:
  369. data = sys.stdin.read().decode('utf8')
  370. wrapwrite(html2text(data, baseurl))