PageRenderTime 46ms CodeModel.GetById 17ms RepoModel.GetById 0ms app.codeStats 0ms

/addons/plugin.video.diafilms/default.py

http://seppius-xbmc-repo.googlecode.com/
Python | 258 lines | 234 code | 3 blank | 21 comment | 2 complexity | d15881f9343cc2e8a6f13a03525cce22 MD5 | raw file
Possible License(s): GPL-3.0, AGPL-1.0
  1. #!/usr/bin/python
  2. # -*- coding: utf-8 -*-
  3. #/*
  4. # * Copyright (C) 2011 Silen
  5. # *
  6. # *
  7. # * This Program is free software; you can redistribute it and/or modify
  8. # * it under the terms of the GNU General Public License as published by
  9. # * the Free Software Foundation; either version 2, or (at your option)
  10. # * any later version.
  11. # *
  12. # * This Program is distributed in the hope that it will be useful,
  13. # * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. # * GNU General Public License for more details.
  16. # *
  17. # * You should have received a copy of the GNU General Public License
  18. # * along with this program; see the file COPYING. If not, write to
  19. # * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  20. # * http://www.gnu.org/copyleft/gpl.html
  21. # */
  22. import re, os, urllib, urllib2, cookielib, time
  23. import xbmc, xbmcgui, xbmcplugin, xbmcaddon
  24. from datetime import date
  25. import resources.lib.diafilms as diafilms
  26. Addon = xbmcaddon.Addon(id='plugin.video.diafilms')
  27. try:
  28. sys.path.append(os.path.join(Addon.getAddonInfo('path'), r'resources', r'lib'))
  29. from BeautifulSoup import BeautifulSoup
  30. except:
  31. try:
  32. sys.path.insert(0, os.path.join(Addon.getAddonInfo('path'), r'resources', r'lib'))
  33. from BeautifulSoup import BeautifulSoup
  34. except:
  35. sys.path.append(os.path.join(os.getcwd(), r'resources', r'lib'))
  36. from BeautifulSoup import BeautifulSoup
  37. icon = xbmc.translatePath(os.path.join(os.getcwd().replace(';', ''),'icon.png'))
  38. import HTMLParser
  39. hpar = HTMLParser.HTMLParser()
  40. h = int(sys.argv[1])
  41. icon = xbmc.translatePath(os.path.join(Addon.getAddonInfo('path'),'icon.png'))
  42. fcookies = xbmc.translatePath(os.path.join(Addon.getAddonInfo('path'), r'resources', r'data', r'cookies.txt'))
  43. def showMessage(heading, message, times = 3000):
  44. xbmc.executebuiltin('XBMC.Notification("%s", "%s", %s, "%s")'%(heading, message, times, icon))
  45. #---------- get categories ----------------------------------------------------
  46. def Get_Categories():
  47. # get diafilm categories
  48. url = 'http://www.diafilmy.su/'
  49. post = None
  50. request = urllib2.Request(url, post)
  51. request.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C)')
  52. request.add_header('Host', 'diafilmy.su')
  53. request.add_header('Accept', '*/*')
  54. request.add_header('Accept-Language', 'ru-RU')
  55. request.add_header('Referer', 'http://google.com')
  56. try:
  57. f = urllib2.urlopen(request)
  58. except IOError, e:
  59. if hasattr(e, 'reason'):
  60. xbmc.log('We failed to reach a server. Reason: '+ e.reason)
  61. elif hasattr(e, 'code'):
  62. xbmc.log('The server couldn\'t fulfill the request. Error code: '+ e.code)
  63. html = f.read()
  64. # -- parsing web page ------------------------------------------------------
  65. soup = BeautifulSoup(html, fromEncoding="windows-1251")
  66. df_nav = soup.findAll("li", { "class" : "sublnk" })
  67. for df in df_nav:
  68. try:
  69. if '/diafilmy' in df.find("a")["href"]:
  70. for dfr in df.findAll('li'):
  71. name = unescape(dfr.find('b').text).encode('utf-8')
  72. url = 'http://www.diafilmy.su' + dfr.find('a')['href']
  73. i = xbmcgui.ListItem('[COLOR FF00FF00]'+name+'[/COLOR]', iconImage=icon, thumbnailImage=icon)
  74. u = sys.argv[0] + '?mode=LIST'
  75. u += '&name=%s'%urllib.quote_plus(name)
  76. u += '&url=%s'%urllib.quote_plus(url)
  77. xbmcplugin.addDirectoryItem(h, u, i, True)
  78. except:
  79. pass
  80. xbmcplugin.endOfDirectory(h)
  81. #--- get number of pages for selected category ---------------------------------
  82. def Get_Page_Number(url):
  83. post = None
  84. request = urllib2.Request(url, post)
  85. request.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C)')
  86. request.add_header('Host', 'diafilmy.su')
  87. request.add_header('Accept', '*/*')
  88. request.add_header('Accept-Language', 'ru-RU')
  89. request.add_header('Referer', 'http://google.com')
  90. try:
  91. f = urllib2.urlopen(request)
  92. except IOError, e:
  93. if hasattr(e, 'reason'):
  94. xbmc.log('We failed to reach a server. Reason: '+ e.reason)
  95. elif hasattr(e, 'code'):
  96. xbmc.log('The server couldn\'t fulfill the request. Error code: '+ e.code)
  97. html = f.read()
  98. # -- parsing web page ------------------------------------------------------
  99. soup = BeautifulSoup(html, fromEncoding="windows-1251")
  100. ret = 1
  101. try:
  102. df_nav = soup.find("div", { "class" : "navigation" })
  103. for df in df_nav.findAll('a'):
  104. try:
  105. if int(df.text) > ret:
  106. ret = int(df.text)
  107. except: pass
  108. except: pass
  109. return ret
  110. #---------- get list of diafilms -----------------------------------------------
  111. def Get_List(params):
  112. # -- parameters
  113. url = urllib.unquote_plus(params['url'])
  114. xbmc.log(url)
  115. # get number of webpages to grab information
  116. page_num = Get_Page_Number(url)
  117. # get all serials
  118. for count in range(1, page_num+1):
  119. Get_List_by_Page(url + '/page/'+str(count)+'/')
  120. xbmcplugin.addSortMethod(h, sortMethod=xbmcplugin.SORT_METHOD_LABEL)
  121. xbmcplugin.endOfDirectory(h)
  122. def Get_List_by_Page(url2):
  123. xbmc.log(' '+url2)
  124. # get diafilm list
  125. post = None
  126. request = urllib2.Request(url2, post)
  127. request.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C)')
  128. request.add_header('Host', 'diafilmy.su')
  129. request.add_header('Accept', '*/*')
  130. request.add_header('Accept-Language', 'ru-RU')
  131. request.add_header('Referer', 'http://google.com')
  132. try:
  133. f = urllib2.urlopen(request)
  134. except IOError, e:
  135. if hasattr(e, 'reason'):
  136. xbmc.log('We failed to reach a server. Reason: '+ e.reason)
  137. elif hasattr(e, 'code'):
  138. xbmc.log('The server couldn\'t fulfill the request. Error code: '+ e.code)
  139. html = f.read()
  140. # -- parsing web page ------------------------------------------------------
  141. soup = BeautifulSoup(html, fromEncoding="windows-1251")
  142. df_nav = soup.findAll('div', {'class':'news'})
  143. for df in df_nav:
  144. name = unescape(df.find('h3').find('a').text).encode('utf-8')
  145. url = df.find('h3').find('a')['href']
  146. img = df.find('img')['src']
  147. i = xbmcgui.ListItem(name, iconImage=img, thumbnailImage=img)
  148. u = sys.argv[0] + '?mode=PLAY'
  149. u += '&name=%s'%urllib.quote_plus(name)
  150. u += '&url=%s'%urllib.quote_plus(url)
  151. u += '&img=%s'%urllib.quote_plus(img)
  152. xbmcplugin.addDirectoryItem(h, u, i, False)
  153. #-------------------------------------------------------------------------------
  154. def PLAY(params):
  155. # -- parameters
  156. url = urllib.unquote_plus(params['url'])
  157. # -- initialize GUI
  158. path = Addon.getAddonInfo('path')
  159. ui = diafilms.Diafilm('Diafilms.xml', path, 'default', '720p')
  160. ui.Set_URL(url)
  161. # -- show images
  162. ui.doModal()
  163. del ui
  164. try: sys.modules.clear()
  165. except: pass
  166. #-------------------------------------------------------------------------------
  167. def unescape(text):
  168. try:
  169. text = hpar.unescape(text)
  170. except:
  171. text = hpar.unescape(text.decode('utf8'))
  172. try:
  173. text = unicode(text, 'utf-8')
  174. except:
  175. text = text
  176. return text
  177. #-------------------------------------------------------------------------------
  178. def get_params(paramstring):
  179. param=[]
  180. if len(paramstring)>=2:
  181. params=paramstring
  182. cleanedparams=params.replace('?','')
  183. if (params[len(params)-1]=='/'):
  184. params=params[0:len(params)-2]
  185. pairsofparams=cleanedparams.split('&')
  186. param={}
  187. for i in range(len(pairsofparams)):
  188. splitparams={}
  189. splitparams=pairsofparams[i].split('=')
  190. if (len(splitparams))==2:
  191. param[splitparams[0]]=splitparams[1]
  192. return param
  193. #-------------------------------------------------------------------------------
  194. params=get_params(sys.argv[2])
  195. # get cookies from last session
  196. cj = cookielib.FileCookieJar(fcookies)
  197. hr = urllib2.HTTPCookieProcessor(cj)
  198. opener = urllib2.build_opener(hr)
  199. urllib2.install_opener(opener)
  200. mode = None
  201. try:
  202. mode = urllib.unquote_plus(params['mode'])
  203. except:
  204. Get_Categories()
  205. if mode == 'LIST':
  206. Get_List(params)
  207. elif mode == 'PLAY':
  208. PLAY(params)