PageRenderTime 23ms CodeModel.GetById 28ms RepoModel.GetById 0ms app.codeStats 0ms

/app/lib/provider/yarr/sources/tpb.py

https://github.com/SpLord/CouchPotato
Python | 160 lines | 145 code | 14 blank | 1 comment | 10 complexity | baa22ab1c5086627deae635e5c3e1f7d MD5 | raw file
  1. from app.config.cplog import CPLog
  2. from app.lib.provider.yarr.base import torrentBase
  3. from app.lib.qualities import Qualities
  4. from dateutil.parser import parse
  5. from imdb.parser.http.bsouplxml._bsoup import SoupStrainer, BeautifulSoup
  6. from urllib import quote_plus
  7. from urllib2 import URLError
  8. import os
  9. import re
  10. import time
  11. import urllib2
  12. log = CPLog(__name__)
  13. class tpb(torrentBase):
  14. """Api for the Pirate Bay"""
  15. name = 'The Pirate Bay'
  16. downloadUrl = 'http://torrents.thepiratebay.org/%s/%s.torrent'
  17. nfoUrl = 'https://thepiratebay.org/torrent/%s'
  18. detailUrl = 'https://thepiratebay.org/torrent/%s'
  19. apiUrl = 'https://thepiratebay.org/search/%s/0/7/%d'
  20. catIds = {
  21. 207: ['720p', '1080p'],
  22. 200: ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip'],
  23. 202: ['dvdr']
  24. }
  25. catBackupId = 200
  26. ignoreString = {
  27. '720p': ' -brrip -bdrip',
  28. '1080p': ' -brrip -bdrip'
  29. }
  30. def __init__(self, config):
  31. log.info('Using TPB.org provider')
  32. self.config = config
  33. def conf(self, option):
  34. return self.config.get('Torrents', option)
  35. def enabled(self):
  36. return self.conf('enabled') and (not self.conf('sendto') == 'Blackhole' or (self.conf('blackhole') and os.path.isdir(self.conf('blackhole'))))
  37. def find(self, movie, quality, type):
  38. results = []
  39. if not self.enabled() or not self.isAvailable(self.apiUrl):
  40. return results
  41. url = self.apiUrl % (quote_plus(self.toSearchString(movie.name + ' ' + quality) + self.makeIgnoreString(type)), self.getCatId(type))
  42. log.info('Searching: %s' % url)
  43. try:
  44. data = urllib2.urlopen(url, timeout = self.timeout).read()
  45. except (IOError, URLError):
  46. log.error('Failed to open %s.' % url)
  47. return results
  48. try:
  49. tables = SoupStrainer('table')
  50. html = BeautifulSoup(data, parseOnlyThese = tables)
  51. resultTable = html.find('table', attrs = {'id':'searchResult'})
  52. for result in resultTable.findAll('tr'):
  53. details = result.find('a', attrs = {'class':'detLink'})
  54. if details:
  55. href = re.search('/(?P<id>\d+)/', details['href'])
  56. id = href.group('id')
  57. name = self.toSaveString(details.contents[0])
  58. desc = result.find('font', attrs = {'class':'detDesc'}).contents[0].split(',')
  59. date = ''
  60. size = 0
  61. for item in desc:
  62. # Weird date stuff
  63. if 'uploaded' in item.lower():
  64. date = item.replace('Uploaded', '')
  65. date = date.replace('Today', '')
  66. # Do something with yesterday
  67. yesterdayMinus = 0
  68. if 'Y-day' in date:
  69. date = date.replace('Y-day', '')
  70. yesterdayMinus = 86400
  71. datestring = date.replace('&nbsp;', ' ').strip()
  72. date = int(time.mktime(parse(datestring).timetuple())) - yesterdayMinus
  73. # size
  74. elif 'size' in item.lower():
  75. size = item.replace('Size', '')
  76. seedleech = []
  77. for td in result.findAll('td'):
  78. try:
  79. seedleech.append(int(td.contents[0]))
  80. except ValueError:
  81. pass
  82. seeders = 0
  83. leechers = 0
  84. if len(seedleech) == 2 and seedleech[0] > 0 and seedleech[1] > 0:
  85. seeders = seedleech[0]
  86. leechers = seedleech[1]
  87. # to item
  88. new = self.feedItem()
  89. new.id = id
  90. new.type = 'torrent'
  91. new.name = name
  92. new.date = date
  93. new.size = self.parseSize(size)
  94. new.seeders = seeders
  95. new.leechers = leechers
  96. new.url = self.downloadLink(id, name)
  97. new.score = self.calcScore(new, movie) + self.uploader(result) + (seeders / 10)
  98. if seeders > 0 and (new.date + (int(self.conf('wait')) * 60 * 60) < time.time()) and Qualities.types.get(type).get('minSize') <= new.size:
  99. new.detailUrl = self.detailLink(id)
  100. new.content = self.getInfo(new.detailUrl)
  101. if self.isCorrectMovie(new, movie, type):
  102. results.append(new)
  103. log.info('Found: %s' % new.name)
  104. return results
  105. except AttributeError:
  106. log.debug('No search results found.')
  107. return []
  108. def makeIgnoreString(self, type):
  109. ignore = self.ignoreString.get(type)
  110. return ignore if ignore else ''
  111. def uploader(self, html):
  112. score = 0
  113. if html.find('img', attr = {'alt':'VIP'}):
  114. score += 3
  115. if html.find('img', attr = {'alt':'Trusted'}):
  116. score += 1
  117. return score
  118. def getInfo(self, url):
  119. log.debug('Getting info: %s' % url)
  120. try:
  121. data = urllib2.urlopen(url, timeout = self.timeout).read()
  122. pass
  123. except IOError:
  124. log.error('Failed to open %s.' % url)
  125. return ''
  126. div = SoupStrainer('div')
  127. html = BeautifulSoup(data, parseOnlyThese = div)
  128. html = html.find('div', attrs = {'class':'nfo'})
  129. return str(html).decode("utf-8", "replace")
  130. def downloadLink(self, id, name):
  131. return self.downloadUrl % (id, quote_plus(name))