PageRenderTime 53ms CodeModel.GetById 20ms RepoModel.GetById 1ms app.codeStats 0ms

/fanficdownloader/adapters/adapter_adastrafanficcom.py

https://code.google.com/p/fanficdownloader/
Python | 238 lines | 197 code | 16 blank | 25 comment | 5 complexity | 164c7b78da7cea047a7eda720711b6b6 MD5 | raw file
Possible License(s): MIT
  1. # -*- coding: utf-8 -*-
  2. # Copyright 2011 Fanficdownloader team
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. # Software: eFiction
  17. import time
  18. import logging
  19. logger = logging.getLogger(__name__)
  20. import re
  21. import urllib
  22. import urllib2
  23. from .. import BeautifulSoup as bs
  24. from ..htmlcleanup import stripHTML
  25. from .. import exceptions as exceptions
  26. from base_adapter import BaseSiteAdapter, makeDate
  27. class AdAstraFanficComSiteAdapter(BaseSiteAdapter):
  28. def __init__(self, config, url):
  29. BaseSiteAdapter.__init__(self, config, url)
  30. self.story.setMetadata('siteabbrev','aaff')
  31. self.decode = ["Windows-1252",
  32. "utf8"] # 1252 is a superset of iso-8859-1.
  33. # Most sites that claim to be
  34. # iso-8859-1 (and some that claim to be
  35. # utf8) are really windows-1252.
  36. self.is_adult=False
  37. # get storyId from url--url validation guarantees query is only sid=1234
  38. self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
  39. # normalized story URL.
  40. self._setURL('http://' + self.getSiteDomain() + '/viewstory.php?sid='+self.story.getMetadata('storyId'))
  41. @staticmethod
  42. def getSiteDomain():
  43. return 'www.adastrafanfic.com'
  44. @classmethod
  45. def getSiteExampleURLs(cls):
  46. return "http://"+cls.getSiteDomain()+"/viewstory.php?sid=1234"
  47. def getSiteURLPattern(self):
  48. return re.escape("http://"+self.getSiteDomain()+"/viewstory.php?sid=")+r"\d+$"
  49. def use_pagecache(self):
  50. '''
  51. adapters that will work with the page cache need to implement
  52. this and change it to True.
  53. '''
  54. return True
  55. def extractChapterUrlsAndMetadata(self):
  56. if self.is_adult or self.getConfig("is_adult"):
  57. addurl = "&warning=5"
  58. else:
  59. addurl=""
  60. url = self.url+'&index=1'+addurl
  61. logger.debug("URL: "+url)
  62. try:
  63. data = self._fetchUrl(url)
  64. except urllib2.HTTPError, e:
  65. if e.code == 404:
  66. raise exceptions.StoryDoesNotExist(self.url)
  67. else:
  68. raise e
  69. if "Content is only suitable for mature adults. May contain explicit language and adult themes. Equivalent of NC-17." in data:
  70. raise exceptions.AdultCheckRequired(self.url)
  71. # problems with some stories, but only in calibre. I suspect
  72. # issues with different SGML parsers in python. This is a
  73. # nasty hack, but it works.
  74. data = data[data.index("<body"):]
  75. # use BeautifulSoup HTML parser to make everything easier to find.
  76. soup = bs.BeautifulSoup(data)
  77. ## Title
  78. a = soup.find('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"$"))
  79. self.story.setMetadata('title',stripHTML(a))
  80. # Find authorid and URL from... author url.
  81. a = soup.find('a', href=re.compile(r"viewuser.php"))
  82. self.story.setMetadata('authorId',a['href'].split('=')[1])
  83. self.story.setMetadata('authorUrl','http://'+self.host+'/'+a['href'])
  84. self.story.setMetadata('author',a.string)
  85. # Find the chapters:
  86. for chapter in soup.findAll('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"&chapter=\d+$")):
  87. # just in case there's tags, like <i> in chapter titles.
  88. self.chapterUrls.append((stripHTML(chapter),'http://'+self.host+'/'+chapter['href']+addurl))
  89. self.story.setMetadata('numChapters',len(self.chapterUrls))
  90. ## <meta name='description' content='&lt;p&gt;Description&lt;/p&gt; ...' >
  91. ## Summary, strangely, is in the content attr of a <meta name='description'> tag
  92. ## which is escaped HTML. Unfortunately, we can't use it because they don't
  93. ## escape (') chars in the desc, breakin the tag.
  94. #meta_desc = soup.find('meta',{'name':'description'})
  95. #metasoup = bs.BeautifulStoneSoup(meta_desc['content'])
  96. #self.story.setMetadata('description',stripHTML(metasoup))
  97. def defaultGetattr(d,k):
  98. try:
  99. return d[k]
  100. except:
  101. return ""
  102. # <span class="label">Rated:</span> NC-17<br /> etc
  103. labels = soup.findAll('span',{'class':'label'})
  104. for labelspan in labels:
  105. value = labelspan.nextSibling
  106. label = labelspan.string
  107. if 'Summary' in label:
  108. ## Everything until the next span class='label'
  109. svalue = ''
  110. while value and not defaultGetattr(value,'class') == 'label':
  111. svalue += str(value)
  112. value = value.nextSibling
  113. # sometimes poorly formated desc (<p> w/o </p>) leads
  114. # to all labels being included.
  115. svalue=svalue[:svalue.find('<span class="label">')]
  116. self.setDescription(url,svalue)
  117. #self.story.setMetadata('description',stripHTML(svalue))
  118. if 'Rated' in label:
  119. self.story.setMetadata('rating', value)
  120. if 'Word count' in label:
  121. self.story.setMetadata('numWords', value)
  122. if 'Categories' in label:
  123. cats = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=categories'))
  124. catstext = [cat.string for cat in cats]
  125. for cat in catstext:
  126. self.story.addToList('category',cat.string)
  127. if 'Characters' in label:
  128. chars = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=characters'))
  129. charstext = [char.string for char in chars]
  130. for char in charstext:
  131. self.story.addToList('characters',char.string)
  132. if 'Genre' in label:
  133. genres = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=1'))
  134. genrestext = [genre.string for genre in genres]
  135. self.genre = ', '.join(genrestext)
  136. for genre in genrestext:
  137. self.story.addToList('genre',genre.string)
  138. if 'Warnings' in label:
  139. warnings = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=2'))
  140. warningstext = [warning.string for warning in warnings]
  141. self.warning = ', '.join(warningstext)
  142. for warning in warningstext:
  143. self.story.addToList('warnings',warning.string)
  144. if 'Completed' in label:
  145. if 'Yes' in value:
  146. self.story.setMetadata('status', 'Completed')
  147. else:
  148. self.story.setMetadata('status', 'In-Progress')
  149. if 'Published' in label:
  150. self.story.setMetadata('datePublished', makeDate(value.strip(), "%d %b %Y"))
  151. if 'Updated' in label:
  152. # there's a stray [ at the end.
  153. #value = value[0:-1]
  154. self.story.setMetadata('dateUpdated', makeDate(value.strip(), "%d %b %Y"))
  155. try:
  156. # Find Series name from series URL.
  157. a = soup.find('a', href=re.compile(r"viewseries.php\?seriesid=\d+"))
  158. series_name = a.string
  159. series_url = 'http://'+self.host+'/'+a['href']
  160. # use BeautifulSoup HTML parser to make everything easier to find.
  161. seriessoup = bs.BeautifulSoup(self._fetchUrl(series_url))
  162. storyas = seriessoup.findAll('a', href=re.compile(r'^viewstory.php\?sid=\d+$'))
  163. i=1
  164. for a in storyas:
  165. if a['href'] == ('viewstory.php?sid='+self.story.getMetadata('storyId')):
  166. self.setSeries(series_name, i)
  167. self.story.setMetadata('seriesUrl',series_url)
  168. break
  169. i+=1
  170. except:
  171. # I find it hard to care if the series parsing fails
  172. pass
  173. def getChapterText(self, url):
  174. logger.debug('Getting chapter text from: %s' % url)
  175. data = self._fetchUrl(url)
  176. # problems with some stories, but only in calibre. I suspect
  177. # issues with different SGML parsers in python. This is a
  178. # nasty hack, but it works.
  179. data = data[data.index("<body"):]
  180. soup = bs.BeautifulStoneSoup(data,
  181. selfClosingTags=('br','hr')) # otherwise soup eats the br/hr tags.
  182. span = soup.find('div', {'id' : 'story'})
  183. if None == span:
  184. raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
  185. return self.utf8FromSoup(url,span)
  186. def getClass():
  187. return AdAstraFanficComSiteAdapter