PageRenderTime 45ms CodeModel.GetById 18ms RepoModel.GetById 1ms app.codeStats 0ms

/source/net/yacy/document/parser/htmlParser.java

https://gitorious.org/yacy
Java | 339 lines | 243 code | 34 blank | 62 comment | 43 complexity | 83b05093e32bce9756cef2f05f237e51 MD5 | raw file
Possible License(s): Apache-2.0, JSON, BSD-3-Clause, LGPL-3.0, GPL-2.0, LGPL-2.1
  1. /**
  2. * htmlParser.java
  3. * Copyright 2009 by Michael Peter Christen, mc@yacy.net, Frankfurt am Main, Germany
  4. * First released 09.07.2009 at http://yacy.net
  5. *
  6. * $LastChangedDate$
  7. * $LastChangedRevision$
  8. * $LastChangedBy$
  9. *
  10. * This library is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * This library is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public License
  21. * along with this program in the file lgpl21.txt
  22. * If not, see <http://www.gnu.org/licenses/>.
  23. */
  24. package net.yacy.document.parser;
  25. import java.io.BufferedInputStream;
  26. import java.io.ByteArrayInputStream;
  27. import java.io.IOException;
  28. import java.io.InputStream;
  29. import java.io.UnsupportedEncodingException;
  30. import java.net.MalformedURLException;
  31. import java.nio.charset.Charset;
  32. import java.nio.charset.IllegalCharsetNameException;
  33. import java.nio.charset.UnsupportedCharsetException;
  34. import java.util.LinkedHashMap;
  35. import net.yacy.cora.document.encoding.UTF8;
  36. import net.yacy.cora.document.id.AnchorURL;
  37. import net.yacy.cora.document.id.DigestURL;
  38. import net.yacy.cora.protocol.ClientIdentification;
  39. import net.yacy.cora.util.CommonPattern;
  40. import net.yacy.document.AbstractParser;
  41. import net.yacy.document.Document;
  42. import net.yacy.document.Parser;
  43. import net.yacy.document.parser.html.ContentScraper;
  44. import net.yacy.document.parser.html.ImageEntry;
  45. import net.yacy.document.parser.html.ScraperInputStream;
  46. import net.yacy.document.parser.html.TransformerWriter;
  47. import net.yacy.kelondro.util.FileUtils;
  48. import com.ibm.icu.text.CharsetDetector;
  49. public class htmlParser extends AbstractParser implements Parser {
  50. private static final int maxLinks = 10000;
  51. public htmlParser() {
  52. super("Streaming HTML Parser");
  53. this.SUPPORTED_EXTENSIONS.add("htm");
  54. this.SUPPORTED_EXTENSIONS.add("html");
  55. this.SUPPORTED_EXTENSIONS.add("phtml");
  56. this.SUPPORTED_EXTENSIONS.add("shtml");
  57. this.SUPPORTED_EXTENSIONS.add("xhtml");
  58. this.SUPPORTED_EXTENSIONS.add("php");
  59. this.SUPPORTED_EXTENSIONS.add("php3");
  60. this.SUPPORTED_EXTENSIONS.add("php4");
  61. this.SUPPORTED_EXTENSIONS.add("php5");
  62. this.SUPPORTED_EXTENSIONS.add("cfm");
  63. this.SUPPORTED_EXTENSIONS.add("asp");
  64. this.SUPPORTED_EXTENSIONS.add("aspx");
  65. this.SUPPORTED_EXTENSIONS.add("tex");
  66. this.SUPPORTED_EXTENSIONS.add("txt");
  67. this.SUPPORTED_MIME_TYPES.add("text/html");
  68. this.SUPPORTED_MIME_TYPES.add("text/xhtml+xml");
  69. this.SUPPORTED_MIME_TYPES.add("application/xhtml+xml");
  70. this.SUPPORTED_MIME_TYPES.add("application/x-httpd-php");
  71. this.SUPPORTED_MIME_TYPES.add("application/x-tex");
  72. this.SUPPORTED_MIME_TYPES.add("text/plain");
  73. this.SUPPORTED_MIME_TYPES.add("text/csv");
  74. }
  75. @Override
  76. public Document[] parse(
  77. final AnchorURL location,
  78. final String mimeType,
  79. final String documentCharset,
  80. final InputStream sourceStream) throws Parser.Failure, InterruptedException {
  81. try {
  82. // first get a document from the parsed html
  83. Charset[] detectedcharsetcontainer = new Charset[]{null};
  84. final ContentScraper scraper = parseToScraper(location, documentCharset, detectedcharsetcontainer, sourceStream, maxLinks);
  85. // parseToScraper also detects/corrects/sets charset from html content tag
  86. final Document document = transformScraper(location, mimeType, detectedcharsetcontainer[0].name(), scraper);
  87. return new Document[]{document};
  88. } catch (final IOException e) {
  89. throw new Parser.Failure("IOException in htmlParser: " + e.getMessage(), location);
  90. }
  91. }
  92. /**
  93. * the transformScraper method transforms a scraper object into a document object
  94. * @param location
  95. * @param mimeType
  96. * @param charSet
  97. * @param scraper
  98. * @return
  99. */
  100. private static Document transformScraper(final DigestURL location, final String mimeType, final String charSet, final ContentScraper scraper) {
  101. final String[] sections = new String[
  102. scraper.getHeadlines(1).length +
  103. scraper.getHeadlines(2).length +
  104. scraper.getHeadlines(3).length +
  105. scraper.getHeadlines(4).length +
  106. scraper.getHeadlines(5).length +
  107. scraper.getHeadlines(6).length];
  108. int p = 0;
  109. for (int i = 1; i <= 6; i++) {
  110. for (final String headline : scraper.getHeadlines(i)) {
  111. sections[p++] = headline;
  112. }
  113. }
  114. LinkedHashMap<AnchorURL, ImageEntry> noDoubleImages = new LinkedHashMap<AnchorURL, ImageEntry>();
  115. for (ImageEntry ie: scraper.getImages()) noDoubleImages.put(ie.url(), ie);
  116. final Document ppd = new Document(
  117. location,
  118. mimeType,
  119. charSet,
  120. scraper,
  121. scraper.getContentLanguages(),
  122. scraper.getKeywords(),
  123. scraper.getTitles(),
  124. scraper.getAuthor(),
  125. scraper.getPublisher(),
  126. sections,
  127. scraper.getDescriptions(),
  128. scraper.getLon(), scraper.getLat(),
  129. scraper.getText(),
  130. scraper.getAnchors(),
  131. scraper.getRSS(),
  132. noDoubleImages,
  133. scraper.indexingDenied(),
  134. scraper.getDate());
  135. ppd.setFavicon(scraper.getFavicon());
  136. return ppd;
  137. }
  138. public static ContentScraper parseToScraper(final DigestURL location, final String documentCharset, String input, int maxLinks) throws IOException {
  139. Charset[] detectedcharsetcontainer = new Charset[]{null};
  140. InputStream sourceStream;
  141. try {
  142. sourceStream = new ByteArrayInputStream(documentCharset == null ? UTF8.getBytes(input) : input.getBytes(documentCharset));
  143. } catch (UnsupportedEncodingException e) {
  144. sourceStream = new ByteArrayInputStream(UTF8.getBytes(input));
  145. }
  146. ContentScraper scraper;
  147. try {
  148. scraper = parseToScraper(location, documentCharset, detectedcharsetcontainer, sourceStream, maxLinks);
  149. } catch (Failure e) {
  150. throw new IOException(e.getMessage());
  151. }
  152. return scraper;
  153. }
  154. public static ContentScraper parseToScraper(
  155. final DigestURL location,
  156. final String documentCharset,
  157. Charset[] detectedcharsetcontainer,
  158. InputStream sourceStream,
  159. final int maxLinks) throws Parser.Failure, IOException {
  160. // make a scraper
  161. String charset = null;
  162. // ah, we are lucky, we got a character-encoding via HTTP-header
  163. if (documentCharset != null) {
  164. charset = patchCharsetEncoding(documentCharset);
  165. }
  166. // nothing found: try to find a meta-tag
  167. if (charset == null) {
  168. ScraperInputStream htmlFilter = null;
  169. try {
  170. htmlFilter = new ScraperInputStream(sourceStream, documentCharset, location, null, false, maxLinks);
  171. sourceStream = htmlFilter;
  172. charset = htmlFilter.detectCharset();
  173. } catch (final IOException e1) {
  174. throw new Parser.Failure("Charset error:" + e1.getMessage(), location);
  175. } finally {
  176. if (htmlFilter != null) htmlFilter.close();
  177. }
  178. }
  179. // the author didn't tell us the encoding, try the mozilla-heuristic
  180. if (charset == null) {
  181. final CharsetDetector det = new CharsetDetector();
  182. det.enableInputFilter(true);
  183. final InputStream detStream = new BufferedInputStream(sourceStream);
  184. det.setText(detStream);
  185. charset = det.detect().getName();
  186. sourceStream = detStream;
  187. }
  188. // wtf? still nothing, just take system-standard
  189. if (charset == null) {
  190. detectedcharsetcontainer[0] = Charset.defaultCharset();
  191. } else {
  192. try {
  193. detectedcharsetcontainer[0] = Charset.forName(charset);
  194. } catch (final IllegalCharsetNameException e) {
  195. detectedcharsetcontainer[0] = Charset.defaultCharset();
  196. } catch (final UnsupportedCharsetException e) {
  197. detectedcharsetcontainer[0] = Charset.defaultCharset();
  198. }
  199. }
  200. // parsing the content
  201. final ContentScraper scraper = new ContentScraper(location, maxLinks);
  202. final TransformerWriter writer = new TransformerWriter(null,null,scraper,null,false, Math.max(64, Math.min(4096, sourceStream.available())));
  203. try {
  204. FileUtils.copy(sourceStream, writer, detectedcharsetcontainer[0]);
  205. } catch (final IOException e) {
  206. throw new Parser.Failure("IO error:" + e.getMessage(), location);
  207. } finally {
  208. writer.flush();
  209. //sourceStream.close(); keep open for multipe parsing (close done by caller)
  210. writer.close();
  211. }
  212. //OutputStream hfos = new htmlFilterOutputStream(null, scraper, null, false);
  213. //serverFileUtils.copy(sourceFile, hfos);
  214. //hfos.close();
  215. if (writer.binarySuspect()) {
  216. final String errorMsg = "Binary data found in resource";
  217. throw new Parser.Failure(errorMsg, location);
  218. }
  219. return scraper;
  220. }
  221. /**
  222. * some html authors use wrong encoding names, either because they don't know exactly what they
  223. * are doing or they produce a type. Many times, the upper/downcase scheme of the name is fuzzy
  224. * This method patches wrong encoding names. The correct names are taken from
  225. * http://www.iana.org/assignments/character-sets
  226. * @param encoding
  227. * @return patched encoding name
  228. */
  229. public static String patchCharsetEncoding(String encoding) {
  230. // do nothing with null
  231. if ((encoding == null) || (encoding.length() < 3)) return null;
  232. // trim encoding string
  233. encoding = encoding.trim();
  234. // fix upper/lowercase
  235. encoding = encoding.toUpperCase();
  236. if (encoding.startsWith("SHIFT")) return "Shift_JIS";
  237. if (encoding.startsWith("BIG")) return "Big5";
  238. // all other names but such with "windows" use uppercase
  239. if (encoding.startsWith("WINDOWS")) encoding = "windows" + encoding.substring(7);
  240. if (encoding.startsWith("MACINTOSH")) encoding = "MacRoman";
  241. // fix wrong fill characters
  242. encoding = CommonPattern.UNDERSCORE.matcher(encoding).replaceAll("-");
  243. if (encoding.matches("GB[_-]?2312([-_]80)?")) return "GB2312";
  244. if (encoding.matches(".*UTF[-_]?8.*")) return "UTF-8";
  245. if (encoding.startsWith("US")) return "US-ASCII";
  246. if (encoding.startsWith("KOI")) return "KOI8-R";
  247. // patch missing '-'
  248. if (encoding.startsWith("windows") && encoding.length() > 7) {
  249. final char c = encoding.charAt(7);
  250. if ((c >= '0') && (c <= '9')) {
  251. encoding = "windows-" + encoding.substring(7);
  252. }
  253. }
  254. if (encoding.startsWith("ISO")) {
  255. // patch typos
  256. if (encoding.length() > 3) {
  257. final char c = encoding.charAt(3);
  258. if ((c >= '0') && (c <= '9')) {
  259. encoding = "ISO-" + encoding.substring(3);
  260. }
  261. }
  262. if (encoding.length() > 8) {
  263. final char c = encoding.charAt(8);
  264. if ((c >= '0') && (c <= '9')) {
  265. encoding = encoding.substring(0, 8) + "-" + encoding.substring(8);
  266. }
  267. }
  268. }
  269. // patch wrong name
  270. if (encoding.startsWith("ISO-8559")) {
  271. // popular typo
  272. encoding = "ISO-8859" + encoding.substring(8);
  273. }
  274. // converting cp\d{4} -> windows-\d{4}
  275. if (encoding.matches("CP([_-])?125[0-8]")) {
  276. final char c = encoding.charAt(2);
  277. if ((c >= '0') && (c <= '9')) {
  278. encoding = "windows-" + encoding.substring(2);
  279. } else {
  280. encoding = "windows" + encoding.substring(2);
  281. }
  282. }
  283. return encoding;
  284. }
  285. public static void main(final String[] args) {
  286. // test parsing of a url
  287. AnchorURL url;
  288. try {
  289. url = new AnchorURL(args[0]);
  290. final byte[] content = url.get(ClientIdentification.yacyInternetCrawlerAgent, null, null);
  291. final Document[] document = new htmlParser().parse(url, "text/html", "utf-8", new ByteArrayInputStream(content));
  292. final String title = document[0].dc_title();
  293. System.out.println(title);
  294. } catch (final MalformedURLException e) {
  295. e.printStackTrace();
  296. } catch (final IOException e) {
  297. e.printStackTrace();
  298. } catch (final Parser.Failure e) {
  299. e.printStackTrace();
  300. } catch (final InterruptedException e) {
  301. e.printStackTrace();
  302. }
  303. System.exit(0);
  304. }
  305. }