PageRenderTime 1517ms CodeModel.GetById 40ms RepoModel.GetById 7ms app.codeStats 0ms

/scrapy/contrib/linkextractors/sgml.py

https://github.com/noplay/scrapy
Python | 158 lines | 133 code | 15 blank | 10 comment | 19 complexity | 127fa643980aa3039cd10090f8f284b3 MD5 | raw file
  1. """
  2. SGMLParser-based Link extractors
  3. """
  4. import re
  5. from urlparse import urlparse, urljoin
  6. from w3lib.url import safe_url_string
  7. from scrapy.selector import HtmlXPathSelector
  8. from scrapy.link import Link
  9. from scrapy.linkextractor import IGNORED_EXTENSIONS
  10. from scrapy.utils.misc import arg_to_iter
  11. from scrapy.utils.python import FixedSGMLParser, unique as unique_list, str_to_unicode
  12. from scrapy.utils.url import canonicalize_url, url_is_from_any_domain, url_has_any_extension
  13. from scrapy.utils.response import get_base_url
  14. class BaseSgmlLinkExtractor(FixedSGMLParser):
  15. def __init__(self, tag="a", attr="href", unique=False, process_value=None):
  16. FixedSGMLParser.__init__(self)
  17. self.scan_tag = tag if callable(tag) else lambda t: t == tag
  18. self.scan_attr = attr if callable(attr) else lambda a: a == attr
  19. self.process_value = (lambda v: v) if process_value is None else process_value
  20. self.current_link = None
  21. self.unique = unique
  22. def _extract_links(self, response_text, response_url, response_encoding, base_url=None):
  23. """ Do the real extraction work """
  24. self.reset()
  25. self.feed(response_text)
  26. self.close()
  27. ret = []
  28. if base_url is None:
  29. base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
  30. for link in self.links:
  31. if isinstance(link.url, unicode):
  32. link.url = link.url.encode(response_encoding)
  33. link.url = urljoin(base_url, link.url)
  34. link.url = safe_url_string(link.url, response_encoding)
  35. link.text = str_to_unicode(link.text, response_encoding, errors='replace')
  36. ret.append(link)
  37. return ret
  38. def _process_links(self, links):
  39. """ Normalize and filter extracted links
  40. The subclass should override it if neccessary
  41. """
  42. links = unique_list(links, key=lambda link: link.url) if self.unique else links
  43. return links
  44. def extract_links(self, response):
  45. # wrapper needed to allow to work directly with text
  46. links = self._extract_links(response.body, response.url, response.encoding)
  47. links = self._process_links(links)
  48. return links
  49. def reset(self):
  50. FixedSGMLParser.reset(self)
  51. self.links = []
  52. self.base_url = None
  53. def unknown_starttag(self, tag, attrs):
  54. if tag == 'base':
  55. self.base_url = dict(attrs).get('href')
  56. if self.scan_tag(tag):
  57. for attr, value in attrs:
  58. if self.scan_attr(attr):
  59. url = self.process_value(value)
  60. if url is not None:
  61. link = Link(url=url)
  62. self.links.append(link)
  63. self.current_link = link
  64. def unknown_endtag(self, tag):
  65. self.current_link = None
  66. def handle_data(self, data):
  67. if self.current_link:
  68. self.current_link.text = self.current_link.text + data.strip()
  69. def matches(self, url):
  70. """This extractor matches with any url, since
  71. it doesn't contain any patterns"""
  72. return True
  73. _re_type = type(re.compile("", 0))
  74. _matches = lambda url, regexs: any((r.search(url) for r in regexs))
  75. _is_valid_url = lambda url: url.split('://', 1)[0] in set(['http', 'https', 'file'])
  76. class SgmlLinkExtractor(BaseSgmlLinkExtractor):
  77. def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
  78. tags=('a', 'area'), attrs=('href'), canonicalize=True, unique=True, process_value=None,
  79. deny_extensions=None):
  80. self.allow_res = [x if isinstance(x, _re_type) else re.compile(x) for x in arg_to_iter(allow)]
  81. self.deny_res = [x if isinstance(x, _re_type) else re.compile(x) for x in arg_to_iter(deny)]
  82. self.allow_domains = set(arg_to_iter(allow_domains))
  83. self.deny_domains = set(arg_to_iter(deny_domains))
  84. self.restrict_xpaths = tuple(arg_to_iter(restrict_xpaths))
  85. self.canonicalize = canonicalize
  86. if deny_extensions is None:
  87. deny_extensions = IGNORED_EXTENSIONS
  88. self.deny_extensions = set(['.' + e for e in deny_extensions])
  89. tag_func = lambda x: x in tags
  90. attr_func = lambda x: x in attrs
  91. BaseSgmlLinkExtractor.__init__(self, tag=tag_func, attr=attr_func,
  92. unique=unique, process_value=process_value)
  93. def extract_links(self, response):
  94. base_url = None
  95. if self.restrict_xpaths:
  96. hxs = HtmlXPathSelector(response)
  97. html = ''.join(''.join(html_fragm for html_fragm in hxs.select(xpath_expr).extract()) \
  98. for xpath_expr in self.restrict_xpaths)
  99. base_url = get_base_url(response)
  100. else:
  101. html = response.body
  102. links = self._extract_links(html, response.url, response.encoding, base_url)
  103. links = self._process_links(links)
  104. return links
  105. def _process_links(self, links):
  106. links = [x for x in links if self._link_allowed(x)]
  107. links = BaseSgmlLinkExtractor._process_links(self, links)
  108. return links
  109. def _link_allowed(self, link):
  110. parsed_url = urlparse(link.url)
  111. allowed = _is_valid_url(link.url)
  112. if self.allow_res:
  113. allowed &= _matches(link.url, self.allow_res)
  114. if self.deny_res:
  115. allowed &= not _matches(link.url, self.deny_res)
  116. if self.allow_domains:
  117. allowed &= url_is_from_any_domain(parsed_url, self.allow_domains)
  118. if self.deny_domains:
  119. allowed &= not url_is_from_any_domain(parsed_url, self.deny_domains)
  120. if self.deny_extensions:
  121. allowed &= not url_has_any_extension(parsed_url, self.deny_extensions)
  122. if allowed and self.canonicalize:
  123. link.url = canonicalize_url(parsed_url)
  124. return allowed
  125. def matches(self, url):
  126. if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains):
  127. return False
  128. if self.deny_domains and url_is_from_any_domain(url, self.deny_domains):
  129. return False
  130. allowed = [regex.search(url) for regex in self.allow_res] if self.allow_res else [True]
  131. denied = [regex.search(url) for regex in self.deny_res] if self.deny_res else []
  132. return any(allowed) and not any(denied)