PageRenderTime 46ms CodeModel.GetById 18ms RepoModel.GetById 1ms app.codeStats 0ms

/legacy.py

https://github.com/jlongman/xbmc-hockeystreams-plugin
Python | 174 lines | 167 code | 7 blank | 0 comment | 3 complexity | a7e0aaea4621d42c3cdaf2d97b396978 MD5 | raw file
  1. import urllib, re, os, sys
  2. from abstract import AbstractHockey
  3. from BeautifulSoup import BeautifulSoup
  4. import xbmcplugin, xbmcaddon, xbmcgui
  5. import hs_rss
  6. __author__ = 'longman'
  7. hqStreams = re.compile('/live_streams/.*')
  8. hqArchives = re.compile('/hockey_archives/0/.*/[0-9]+')
  9. archivePlaybackTypes = re.compile('/hockey_archives/0/.*/[0-9]+/[a-z_]+')
  10. livePlaybackTypes = re.compile('/live_streams/.*/[0-9]+/[a-z_]+')
  11. ARCHIVE_STRIP = " hockey archives 0 "
  12. LIVE_STRIP = " live streams "
  13. hockeystreams = 'http://www.hockeystreams.com'
  14. archivestreams = hockeystreams + '/hockey_archives'
  15. class LegacyHockey(AbstractHockey):
  16. def __init__(self, hockeyUtil, mark_broken = False, debug = False):
  17. super(LegacyHockey, self).__init__(hockeyUtil)
  18. self.__dbg__ = debug
  19. self.mark_broken_cdn4_links = mark_broken
  20. def CATEGORY_LIVE_GAMES(self, mode):
  21. xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_NONE)
  22. if self.__dbg__: print ("hockeystreams: enter live games")
  23. html = urllib.urlopen("http://www4.hockeystreams.com/rss/streams.php")
  24. games = hs_rss.get_rss_streams(html, _debug_ = self.__dbg__)
  25. for gameName, url, date, real_date in sorted(games, key = lambda game: game[3]):
  26. if '-' in date:
  27. gameName = gameName + " " + date.split(' - ', 1)[1]
  28. self.util.addDir(gameName, url, mode, '', 1, gamename = gameName, fullDate = real_date)
  29. def CATEGORY_LAST_15_GAMES(self, mode):
  30. xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_NONE)
  31. if self.__dbg__: print ("hockeystreams: enter live games")
  32. html = urllib.urlopen("http://www6.hockeystreams.com/rss/archives.php")
  33. games = hs_rss.get_archive_rss_streams(html, _debug_ = self.__dbg__)
  34. for gameName, url, date, real_date in sorted(games, key = lambda game: game[3], reverse=True):
  35. gameName = gameName + " " + date
  36. url = hockeystreams + url
  37. self.util.addDir(gameName, url, mode, '', 1, gamename = gameName, fullDate = real_date)
  38. def ARCHIVE_GAMES_BY_DATE(self, year, month, day):
  39. mode = 1000
  40. if self.__dbg__: print ("hockeystreams: enter archive games")
  41. archiveDate = self.get_date(day, month, year)
  42. url = archivestreams + '/' + archiveDate + '/'
  43. strip = ARCHIVE_STRIP
  44. games = self.find_hockey_game_names(url, hqArchives)
  45. for k, v in games.iteritems():
  46. gameName = k
  47. offset = gameName.index(strip) + len(strip)
  48. gameName = gameName[offset:]
  49. self.util.addDir(gameName, v, mode, '', 1, gamename = gameName)
  50. def CATEGORY_BY_TEAM(self, mode):
  51. url = archivestreams
  52. if self.__dbg__: print ("hockeystreams: enter team")
  53. teamNames = re.compile('/hockey_archives/'+ self.archiveDate + '/[a-z]+_?[a-z]?') #simplified
  54. foundTeams = self.util.soupIt(url + "/" + self.archiveDate, "attrs", teamNames)
  55. for team in foundTeams:
  56. if self.__dbg__: print ("hockeystreams: \t\t soupfound team %s" % (str(team)))
  57. ending = str(team['href'])
  58. teamPage = hockeystreams + ending
  59. teamName = os.path.basename(teamPage)
  60. teamName = re.sub('_|/', ' ', teamName)
  61. if self.__dbg__: print ("hockeystreams: \t\t team %s" % teamName)
  62. image_name = teamName[0:teamName.rfind(' ')]
  63. image_name = image_name.replace(' ','')
  64. # teamGIF = "http://www5.hockeystreams.com/images/teams/big/" + image_name + ".gif"
  65. teamGIF = "http://www5.hockeystreams.com/images/teams/" + image_name + ".gif"
  66. if self.__dbg__: print ("hockeystreams: \t\t team %s %s" % (teamName, teamGIF))
  67. self.util.addDir(teamName, teamPage, mode, teamGIF, 82)
  68. def ARCHIVE_GAMES_BY_TEAM(self, url, mode):
  69. if self.__dbg__:
  70. print ("hockeystreams: enter archive games")
  71. strip = ARCHIVE_STRIP
  72. games = self.find_hockey_game_names(url, hqArchives)
  73. for k, v in games.iteritems():
  74. gameName = k
  75. offset = gameName.find(strip) + len(strip)
  76. gameName = gameName[offset:]
  77. self.util.addDir(gameName, v, mode, '', 1000, gamename = gameName)
  78. def find_hockey_game_names(self, url, gameType):
  79. games = {}
  80. foundGames = self.util.soupIt(url, 'attrs', gameType)
  81. for test in foundGames:
  82. if self.__dbg__:
  83. print ("hockeystreams: \t\t foundGames %s" % str(test))
  84. ending = str(test['href'])
  85. gamePage = hockeystreams + ending
  86. gameName = os.path.dirname(gamePage)
  87. if "archive" in url and gameName.endswith(self.archiveDate):
  88. if self.__dbg__: print "\t\t\tskipping " + str(ending)
  89. continue
  90. gameName = re.sub('_|/', ' ', gameName)
  91. if self.__dbg__:
  92. print ("hockeystreams: \t\t gamename %s" % gameName)
  93. games[gameName] = gamePage
  94. del foundGames
  95. return games
  96. def QUALITY(self, url, gamename):
  97. if self.__dbg__:
  98. print ("hockeystreams: enter quality")
  99. games = self.find_qualities(url)
  100. if not self.mark_broken_cdn4_links:
  101. return self.QUALITY_quick(games, gamename)
  102. else:
  103. return self.QUALITY_slow(games, gamename)
  104. def QUALITY_slow(self, games, gamename):
  105. directLinks = {}
  106. silverLinks = {}
  107. for k, v in games.iteritems():
  108. if self.__dbg__: print "game qs: " + str(games)
  109. foundGames = self.util.soupIt(v,'input', None, True)
  110. for test in foundGames:
  111. if self.__dbg__: print("hockeystreams: \t\t soupfound directs %s" % test)
  112. if 'direct_link' in test.get('id',''):
  113. directLink = test['value']
  114. directLinks[k] = directLink
  115. if 'silverlight' in test.get('href','') and 'archive' in test.get('href',''):
  116. silverLink = test.get('href','')
  117. silverLinks["silverlight"] = silverLink
  118. for name,url in directLinks.iteritems():
  119. qualityName = name #name[name.rindex('/'):]
  120. if self.mark_broken_cdn4_links and 'cdn-a-4' in url:
  121. qualityName += "*"
  122. self.util.addLink(qualityName, gamename, '', url, '', 1)
  123. for name,url in silverLinks.iteritems():
  124. self.util.addLink("has " + name, name, '', url, '', 1)
  125. def QUALITY_quick(self, games, gamename):
  126. for quality, url in games.iteritems():
  127. if self.__dbg__:
  128. print "game qs: " + str(games)
  129. self.util.addLink(quality, gamename, '', url, '', 1, 2000)
  130. def find_qualities(self, url):
  131. games = {}
  132. if self.__dbg__:
  133. print ("hockeystreams: \t\t find qs ")
  134. if 'archive' in url:
  135. foundQs = self.util.soupIt(url, 'attrs', archivePlaybackTypes, True)
  136. else:
  137. foundQs = self.util.soupIt(url, 'attrs', livePlaybackTypes, True)
  138. for test in foundQs:
  139. if self.__dbg__:
  140. print ("hockeystreams: \t\t soupfound qs %s" % (str(test)))
  141. ending = str(test['href'])
  142. gamePage = hockeystreams + ending
  143. gameName = os.path.basename(gamePage)
  144. gameName = re.sub('_|/', ' ', gameName)
  145. if self.__dbg__:
  146. print ("hockeystreams: \t\t q: %s" % gameName)
  147. games[gameName] = gamePage
  148. del foundQs
  149. return games