PageRenderTime 29ms CodeModel.GetById 15ms RepoModel.GetById 1ms app.codeStats 0ms

/lib/imdb/parser/sql/__init__.py

https://gitlab.com/akila-33/Sick-Beard
Python | 1215 lines | 1181 code | 4 blank | 30 comment | 13 complexity | 266363df2d48833d5d283dfedfa154b9 MD5 | raw file
  1. """
  2. parser.sql package (imdb package).
  3. This package provides the IMDbSqlAccessSystem class used to access
  4. IMDb's data through a SQL database. Every database supported by
  5. the SQLObject _AND_ SQLAlchemy Object Relational Managers is available.
  6. the imdb.IMDb function will return an instance of this class when
  7. called with the 'accessSystem' argument set to "sql", "database" or "db".
  8. Copyright 2005-2010 Davide Alberani <da@erlug.linux.it>
  9. This program is free software; you can redistribute it and/or modify
  10. it under the terms of the GNU General Public License as published by
  11. the Free Software Foundation; either version 2 of the License, or
  12. (at your option) any later version.
  13. This program is distributed in the hope that it will be useful,
  14. but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. GNU General Public License for more details.
  17. You should have received a copy of the GNU General Public License
  18. along with this program; if not, write to the Free Software
  19. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. """
  21. # FIXME: this whole module was written in a veeery short amount of time.
  22. # The code should be commented, rewritten and cleaned. :-)
  23. import re
  24. import logging
  25. from difflib import SequenceMatcher
  26. from codecs import lookup
  27. from imdb import IMDbBase
  28. from imdb.utils import normalizeName, normalizeTitle, build_title, \
  29. build_name, analyze_name, analyze_title, \
  30. canonicalTitle, canonicalName, re_titleRef, \
  31. build_company_name, re_episodes, _unicodeArticles, \
  32. analyze_company_name, re_year_index, re_nameRef
  33. from imdb.Person import Person
  34. from imdb.Movie import Movie
  35. from imdb.Company import Company
  36. from imdb._exceptions import IMDbDataAccessError, IMDbError
  37. # Logger for miscellaneous functions.
  38. _aux_logger = logging.getLogger('imdbpy.parser.sql.aux')
  39. # =============================
  40. # Things that once upon a time were in imdb.parser.common.locsql.
  41. def titleVariations(title, fromPtdf=0):
  42. """Build title variations useful for searches; if fromPtdf is true,
  43. the input is assumed to be in the plain text data files format."""
  44. if fromPtdf: title1 = u''
  45. else: title1 = title
  46. title2 = title3 = u''
  47. if fromPtdf or re_year_index.search(title):
  48. # If it appears to have a (year[/imdbIndex]) indication,
  49. # assume that a long imdb canonical name was provided.
  50. titldict = analyze_title(title, canonical=1)
  51. # title1: the canonical name.
  52. title1 = titldict['title']
  53. if titldict['kind'] != 'episode':
  54. # title3: the long imdb canonical name.
  55. if fromPtdf: title3 = title
  56. else: title3 = build_title(titldict, canonical=1, ptdf=1)
  57. else:
  58. title1 = normalizeTitle(title1)
  59. title3 = build_title(titldict, canonical=1, ptdf=1)
  60. else:
  61. # Just a title.
  62. # title1: the canonical title.
  63. title1 = canonicalTitle(title)
  64. title3 = u''
  65. # title2 is title1 without the article, or title1 unchanged.
  66. if title1:
  67. title2 = title1
  68. t2s = title2.split(u', ')
  69. if t2s[-1].lower() in _unicodeArticles:
  70. title2 = u', '.join(t2s[:-1])
  71. _aux_logger.debug('title variations: 1:[%s] 2:[%s] 3:[%s]',
  72. title1, title2, title3)
  73. return title1, title2, title3
  74. re_nameIndex = re.compile(r'\(([IVXLCDM]+)\)')
  75. def nameVariations(name, fromPtdf=0):
  76. """Build name variations useful for searches; if fromPtdf is true,
  77. the input is assumed to be in the plain text data files format."""
  78. name1 = name2 = name3 = u''
  79. if fromPtdf or re_nameIndex.search(name):
  80. # We've a name with an (imdbIndex)
  81. namedict = analyze_name(name, canonical=1)
  82. # name1 is the name in the canonical format.
  83. name1 = namedict['name']
  84. # name3 is the canonical name with the imdbIndex.
  85. if fromPtdf:
  86. if namedict.has_key('imdbIndex'):
  87. name3 = name
  88. else:
  89. name3 = build_name(namedict, canonical=1)
  90. else:
  91. # name1 is the name in the canonical format.
  92. name1 = canonicalName(name)
  93. name3 = u''
  94. # name2 is the name in the normal format, if it differs from name1.
  95. name2 = normalizeName(name1)
  96. if name1 == name2: name2 = u''
  97. _aux_logger.debug('name variations: 1:[%s] 2:[%s] 3:[%s]',
  98. name1, name2, name3)
  99. return name1, name2, name3
  100. try:
  101. from cutils import ratcliff as _ratcliff
  102. def ratcliff(s1, s2, sm):
  103. """Return the Ratcliff-Obershelp value between the two strings,
  104. using the C implementation."""
  105. return _ratcliff(s1.encode('latin_1', 'replace'),
  106. s2.encode('latin_1', 'replace'))
  107. except ImportError:
  108. _aux_logger.warn('Unable to import the cutils.ratcliff function.'
  109. ' Searching names and titles using the "sql"'
  110. ' data access system will be slower.')
  111. def ratcliff(s1, s2, sm):
  112. """Ratcliff-Obershelp similarity."""
  113. STRING_MAXLENDIFFER = 0.7
  114. s1len = len(s1)
  115. s2len = len(s2)
  116. if s1len < s2len:
  117. threshold = float(s1len) / s2len
  118. else:
  119. threshold = float(s2len) / s1len
  120. if threshold < STRING_MAXLENDIFFER:
  121. return 0.0
  122. sm.set_seq2(s2.lower())
  123. return sm.ratio()
  124. def merge_roles(mop):
  125. """Merge multiple roles."""
  126. new_list = []
  127. for m in mop:
  128. if m in new_list:
  129. keep_this = new_list[new_list.index(m)]
  130. if not isinstance(keep_this.currentRole, list):
  131. keep_this.currentRole = [keep_this.currentRole]
  132. keep_this.currentRole.append(m.currentRole)
  133. else:
  134. new_list.append(m)
  135. return new_list
  136. def scan_names(name_list, name1, name2, name3, results=0, ro_thresold=None,
  137. _scan_character=False):
  138. """Scan a list of names, searching for best matches against
  139. the given variations."""
  140. if ro_thresold is not None: RO_THRESHOLD = ro_thresold
  141. else: RO_THRESHOLD = 0.6
  142. sm1 = SequenceMatcher()
  143. sm2 = SequenceMatcher()
  144. sm3 = SequenceMatcher()
  145. sm1.set_seq1(name1.lower())
  146. if name2: sm2.set_seq1(name2.lower())
  147. if name3: sm3.set_seq1(name3.lower())
  148. resd = {}
  149. for i, n_data in name_list:
  150. nil = n_data['name']
  151. # XXX: on Symbian, here we get a str; not sure this is the
  152. # right place to fix it.
  153. if isinstance(nil, str):
  154. nil = unicode(nil, 'latin1', 'ignore')
  155. # Distance with the canonical name.
  156. ratios = [ratcliff(name1, nil, sm1) + 0.05]
  157. namesurname = u''
  158. if not _scan_character:
  159. nils = nil.split(', ', 1)
  160. surname = nils[0]
  161. if len(nils) == 2: namesurname = '%s %s' % (nils[1], surname)
  162. else:
  163. nils = nil.split(' ', 1)
  164. surname = nils[-1]
  165. namesurname = nil
  166. if surname != nil:
  167. # Distance with the "Surname" in the database.
  168. ratios.append(ratcliff(name1, surname, sm1))
  169. if not _scan_character:
  170. ratios.append(ratcliff(name1, namesurname, sm1))
  171. if name2:
  172. ratios.append(ratcliff(name2, surname, sm2))
  173. # Distance with the "Name Surname" in the database.
  174. if namesurname:
  175. ratios.append(ratcliff(name2, namesurname, sm2))
  176. if name3:
  177. # Distance with the long imdb canonical name.
  178. ratios.append(ratcliff(name3,
  179. build_name(n_data, canonical=1), sm3) + 0.1)
  180. ratio = max(ratios)
  181. if ratio >= RO_THRESHOLD:
  182. if resd.has_key(i):
  183. if ratio > resd[i][0]: resd[i] = (ratio, (i, n_data))
  184. else: resd[i] = (ratio, (i, n_data))
  185. res = resd.values()
  186. res.sort()
  187. res.reverse()
  188. if results > 0: res[:] = res[:results]
  189. return res
  190. def scan_titles(titles_list, title1, title2, title3, results=0,
  191. searchingEpisode=0, onlyEpisodes=0, ro_thresold=None):
  192. """Scan a list of titles, searching for best matches against
  193. the given variations."""
  194. if ro_thresold is not None: RO_THRESHOLD = ro_thresold
  195. else: RO_THRESHOLD = 0.6
  196. sm1 = SequenceMatcher()
  197. sm2 = SequenceMatcher()
  198. sm3 = SequenceMatcher()
  199. sm1.set_seq1(title1.lower())
  200. sm2.set_seq2(title2.lower())
  201. if title3:
  202. sm3.set_seq1(title3.lower())
  203. if title3[-1] == '}': searchingEpisode = 1
  204. hasArt = 0
  205. if title2 != title1: hasArt = 1
  206. resd = {}
  207. for i, t_data in titles_list:
  208. if onlyEpisodes:
  209. if t_data.get('kind') != 'episode':
  210. continue
  211. til = t_data['title']
  212. if til[-1] == ')':
  213. dateIdx = til.rfind('(')
  214. if dateIdx != -1:
  215. til = til[:dateIdx].rstrip()
  216. if not til:
  217. continue
  218. ratio = ratcliff(title1, til, sm1)
  219. if ratio >= RO_THRESHOLD:
  220. resd[i] = (ratio, (i, t_data))
  221. continue
  222. if searchingEpisode:
  223. if t_data.get('kind') != 'episode': continue
  224. elif t_data.get('kind') == 'episode': continue
  225. til = t_data['title']
  226. # XXX: on Symbian, here we get a str; not sure this is the
  227. # right place to fix it.
  228. if isinstance(til, str):
  229. til = unicode(til, 'latin1', 'ignore')
  230. # Distance with the canonical title (with or without article).
  231. # titleS -> titleR
  232. # titleS, the -> titleR, the
  233. if not searchingEpisode:
  234. til = canonicalTitle(til)
  235. ratios = [ratcliff(title1, til, sm1) + 0.05]
  236. # til2 is til without the article, if present.
  237. til2 = til
  238. tils = til2.split(', ')
  239. matchHasArt = 0
  240. if tils[-1].lower() in _unicodeArticles:
  241. til2 = ', '.join(tils[:-1])
  242. matchHasArt = 1
  243. if hasArt and not matchHasArt:
  244. # titleS[, the] -> titleR
  245. ratios.append(ratcliff(title2, til, sm2))
  246. elif matchHasArt and not hasArt:
  247. # titleS -> titleR[, the]
  248. ratios.append(ratcliff(title1, til2, sm1))
  249. else:
  250. ratios = [0.0]
  251. if title3:
  252. # Distance with the long imdb canonical title.
  253. ratios.append(ratcliff(title3,
  254. build_title(t_data, canonical=1, ptdf=1), sm3) + 0.1)
  255. ratio = max(ratios)
  256. if ratio >= RO_THRESHOLD:
  257. if resd.has_key(i):
  258. if ratio > resd[i][0]:
  259. resd[i] = (ratio, (i, t_data))
  260. else: resd[i] = (ratio, (i, t_data))
  261. res = resd.values()
  262. res.sort()
  263. res.reverse()
  264. if results > 0: res[:] = res[:results]
  265. return res
  266. def scan_company_names(name_list, name1, results=0, ro_thresold=None):
  267. """Scan a list of company names, searching for best matches against
  268. the given name. Notice that this function takes a list of
  269. strings, and not a list of dictionaries."""
  270. if ro_thresold is not None: RO_THRESHOLD = ro_thresold
  271. else: RO_THRESHOLD = 0.6
  272. sm1 = SequenceMatcher()
  273. sm1.set_seq1(name1.lower())
  274. resd = {}
  275. withoutCountry = not name1.endswith(']')
  276. for i, n in name_list:
  277. # XXX: on Symbian, here we get a str; not sure this is the
  278. # right place to fix it.
  279. if isinstance(n, str):
  280. n = unicode(n, 'latin1', 'ignore')
  281. o_name = n
  282. var = 0.0
  283. if withoutCountry and n.endswith(']'):
  284. cidx = n.rfind('[')
  285. if cidx != -1:
  286. n = n[:cidx].rstrip()
  287. var = -0.05
  288. # Distance with the company name.
  289. ratio = ratcliff(name1, n, sm1) + var
  290. if ratio >= RO_THRESHOLD:
  291. if resd.has_key(i):
  292. if ratio > resd[i][0]: resd[i] = (ratio,
  293. (i, analyze_company_name(o_name)))
  294. else:
  295. resd[i] = (ratio, (i, analyze_company_name(o_name)))
  296. res = resd.values()
  297. res.sort()
  298. res.reverse()
  299. if results > 0: res[:] = res[:results]
  300. return res
  301. try:
  302. from cutils import soundex
  303. except ImportError:
  304. _aux_logger.warn('Unable to import the cutils.soundex function.'
  305. ' Searches of movie titles and person names will be'
  306. ' a bit slower.')
  307. _translate = dict(B='1', C='2', D='3', F='1', G='2', J='2', K='2', L='4',
  308. M='5', N='5', P='1', Q='2', R='6', S='2', T='3', V='1',
  309. X='2', Z='2')
  310. _translateget = _translate.get
  311. _re_non_ascii = re.compile(r'^[^a-z]*', re.I)
  312. SOUNDEX_LEN = 5
  313. def soundex(s):
  314. """Return the soundex code for the given string."""
  315. # Maximum length of the soundex code.
  316. s = _re_non_ascii.sub('', s)
  317. if not s: return None
  318. s = s.upper()
  319. soundCode = s[0]
  320. for c in s[1:]:
  321. cw = _translateget(c, '0')
  322. if cw != '0' and soundCode[-1] != cw:
  323. soundCode += cw
  324. return soundCode[:SOUNDEX_LEN] or None
  325. def _sortKeywords(keyword, kwds):
  326. """Sort a list of keywords, based on the searched one."""
  327. sm = SequenceMatcher()
  328. sm.set_seq1(keyword.lower())
  329. ratios = [(ratcliff(keyword, k, sm), k) for k in kwds]
  330. checkContained = False
  331. if len(keyword) > 4:
  332. checkContained = True
  333. for idx, data in enumerate(ratios):
  334. ratio, key = data
  335. if key.startswith(keyword):
  336. ratios[idx] = (ratio+0.5, key)
  337. elif checkContained and keyword in key:
  338. ratios[idx] = (ratio+0.3, key)
  339. ratios.sort()
  340. ratios.reverse()
  341. return [r[1] for r in ratios]
  342. def filterSimilarKeywords(keyword, kwdsIterator):
  343. """Return a sorted list of keywords similar to the one given."""
  344. seenDict = {}
  345. kwdSndx = soundex(keyword.encode('ascii', 'ignore'))
  346. matches = []
  347. matchesappend = matches.append
  348. checkContained = False
  349. if len(keyword) > 4:
  350. checkContained = True
  351. for movieID, key in kwdsIterator:
  352. if key in seenDict:
  353. continue
  354. seenDict[key] = None
  355. if checkContained and keyword in key:
  356. matchesappend(key)
  357. continue
  358. if kwdSndx == soundex(key.encode('ascii', 'ignore')):
  359. matchesappend(key)
  360. return _sortKeywords(keyword, matches)
  361. # =============================
  362. _litlist = ['screenplay/teleplay', 'novel', 'adaption', 'book',
  363. 'production process protocol', 'interviews',
  364. 'printed media reviews', 'essays', 'other literature']
  365. _litd = dict([(x, ('literature', x)) for x in _litlist])
  366. _buslist = ['budget', 'weekend gross', 'gross', 'opening weekend', 'rentals',
  367. 'admissions', 'filming dates', 'production dates', 'studios',
  368. 'copyright holder']
  369. _busd = dict([(x, ('business', x)) for x in _buslist])
  370. def _reGroupDict(d, newgr):
  371. """Regroup keys in the d dictionary in subdictionaries, based on
  372. the scheme in the newgr dictionary.
  373. E.g.: in the newgr, an entry 'LD label': ('laserdisc', 'label')
  374. tells the _reGroupDict() function to take the entry with
  375. label 'LD label' (as received from the sql database)
  376. and put it in the subsection (another dictionary) named
  377. 'laserdisc', using the key 'label'."""
  378. r = {}
  379. newgrks = newgr.keys()
  380. for k, v in d.items():
  381. if k in newgrks:
  382. r.setdefault(newgr[k][0], {})[newgr[k][1]] = v
  383. # A not-so-clearer version:
  384. ##r.setdefault(newgr[k][0], {})
  385. ##r[newgr[k][0]][newgr[k][1]] = v
  386. else: r[k] = v
  387. return r
  388. def _groupListBy(l, index):
  389. """Regroup items in a list in a list of lists, grouped by
  390. the value at the given index."""
  391. tmpd = {}
  392. for item in l:
  393. tmpd.setdefault(item[index], []).append(item)
  394. res = tmpd.values()
  395. return res
  396. def sub_dict(d, keys):
  397. """Return the subdictionary of 'd', with just the keys listed in 'keys'."""
  398. return dict([(k, d[k]) for k in keys if k in d])
  399. def get_movie_data(movieID, kindDict, fromAka=0, _table=None):
  400. """Return a dictionary containing data about the given movieID;
  401. if fromAka is true, the AkaTitle table is searched; _table is
  402. reserved for the imdbpy2sql.py script."""
  403. if _table is not None:
  404. Table = _table
  405. else:
  406. if not fromAka: Table = Title
  407. else: Table = AkaTitle
  408. m = Table.get(movieID)
  409. mdict = {'title': m.title, 'kind': kindDict[m.kindID],
  410. 'year': m.productionYear, 'imdbIndex': m.imdbIndex,
  411. 'season': m.seasonNr, 'episode': m.episodeNr}
  412. if not fromAka:
  413. if m.seriesYears is not None:
  414. mdict['series years'] = unicode(m.seriesYears)
  415. if mdict['imdbIndex'] is None: del mdict['imdbIndex']
  416. if mdict['year'] is None: del mdict['year']
  417. else:
  418. try:
  419. mdict['year'] = int(mdict['year'])
  420. except (TypeError, ValueError):
  421. del mdict['year']
  422. if mdict['season'] is None: del mdict['season']
  423. else:
  424. try: mdict['season'] = int(mdict['season'])
  425. except: pass
  426. if mdict['episode'] is None: del mdict['episode']
  427. else:
  428. try: mdict['episode'] = int(mdict['episode'])
  429. except: pass
  430. episodeOfID = m.episodeOfID
  431. if episodeOfID is not None:
  432. ser_dict = get_movie_data(episodeOfID, kindDict, fromAka)
  433. mdict['episode of'] = Movie(data=ser_dict, movieID=episodeOfID,
  434. accessSystem='sql')
  435. if fromAka:
  436. ser_note = AkaTitle.get(episodeOfID).note
  437. if ser_note:
  438. mdict['episode of'].notes = ser_note
  439. return mdict
  440. def _iterKeywords(results):
  441. """Iterate over (key.id, key.keyword) columns of a selection of
  442. the Keyword table."""
  443. for key in results:
  444. yield key.id, key.keyword
  445. def getSingleInfo(table, movieID, infoType, notAList=False):
  446. """Return a dictionary in the form {infoType: infoListOrString},
  447. retrieving a single set of information about a given movie, from
  448. the specified table."""
  449. infoTypeID = InfoType.select(InfoType.q.info == infoType)
  450. if infoTypeID.count() == 0:
  451. return {}
  452. res = table.select(AND(table.q.movieID == movieID,
  453. table.q.infoTypeID == infoTypeID[0].id))
  454. retList = []
  455. for r in res:
  456. info = r.info
  457. note = r.note
  458. if note:
  459. info += u'::%s' % note
  460. retList.append(info)
  461. if not retList:
  462. return {}
  463. if not notAList: return {infoType: retList}
  464. else: return {infoType: retList[0]}
  465. def _cmpTop(a, b, what='top 250 rank'):
  466. """Compare function used to sort top 250/bottom 10 rank."""
  467. av = int(a[1].get(what))
  468. bv = int(b[1].get(what))
  469. if av == bv:
  470. return 0
  471. return (-1, 1)[av > bv]
  472. def _cmpBottom(a, b):
  473. """Compare function used to sort top 250/bottom 10 rank."""
  474. return _cmpTop(a, b, what='bottom 10 rank')
  475. class IMDbSqlAccessSystem(IMDbBase):
  476. """The class used to access IMDb's data through a SQL database."""
  477. accessSystem = 'sql'
  478. _sql_logger = logging.getLogger('imdbpy.parser.sql')
  479. def __init__(self, uri, adultSearch=1, useORM=None, *arguments, **keywords):
  480. """Initialize the access system."""
  481. IMDbBase.__init__(self, *arguments, **keywords)
  482. if useORM is None:
  483. useORM = ('sqlobject', 'sqlalchemy')
  484. if not isinstance(useORM, (tuple, list)):
  485. if ',' in useORM:
  486. useORM = useORM.split(',')
  487. else:
  488. useORM = [useORM]
  489. self.useORM = useORM
  490. nrMods = len(useORM)
  491. _gotError = False
  492. DB_TABLES = []
  493. for idx, mod in enumerate(useORM):
  494. mod = mod.strip().lower()
  495. try:
  496. if mod == 'sqlalchemy':
  497. from alchemyadapter import getDBTables, NotFoundError, \
  498. setConnection, AND, OR, IN, \
  499. ISNULL, CONTAINSSTRING, toUTF8
  500. elif mod == 'sqlobject':
  501. from objectadapter import getDBTables, NotFoundError, \
  502. setConnection, AND, OR, IN, \
  503. ISNULL, CONTAINSSTRING, toUTF8
  504. else:
  505. self._sql_logger.warn('unknown module "%s"' % mod)
  506. continue
  507. self._sql_logger.info('using %s ORM', mod)
  508. # XXX: look ma'... black magic! It's used to make
  509. # TableClasses and some functions accessible
  510. # through the whole module.
  511. for k, v in [('NotFoundError', NotFoundError),
  512. ('AND', AND), ('OR', OR), ('IN', IN),
  513. ('ISNULL', ISNULL),
  514. ('CONTAINSSTRING', CONTAINSSTRING)]:
  515. globals()[k] = v
  516. self.toUTF8 = toUTF8
  517. DB_TABLES = getDBTables(uri)
  518. for t in DB_TABLES:
  519. globals()[t._imdbpyName] = t
  520. if _gotError:
  521. self._sql_logger.warn('falling back to "%s"' % mod)
  522. break
  523. except ImportError, e:
  524. if idx+1 >= nrMods:
  525. raise IMDbError('unable to use any ORM in %s: %s' % (
  526. str(useORM), str(e)))
  527. else:
  528. self._sql_logger.warn('unable to use "%s": %s' % (mod,
  529. str(e)))
  530. _gotError = True
  531. continue
  532. else:
  533. raise IMDbError('unable to use any ORM in %s' % str(useORM))
  534. # Set the connection to the database.
  535. self._sql_logger.debug('connecting to %s', uri)
  536. try:
  537. self._connection = setConnection(uri, DB_TABLES)
  538. except AssertionError, e:
  539. raise IMDbDataAccessError( \
  540. 'unable to connect to the database server; ' + \
  541. 'complete message: "%s"' % str(e))
  542. self.Error = self._connection.module.Error
  543. # Maps some IDs to the corresponding strings.
  544. self._kind = {}
  545. self._kindRev = {}
  546. self._sql_logger.debug('reading constants from the database')
  547. try:
  548. for kt in KindType.select():
  549. self._kind[kt.id] = kt.kind
  550. self._kindRev[str(kt.kind)] = kt.id
  551. except self.Error:
  552. # NOTE: you can also get the error, but - at least with
  553. # MySQL - it also contains the password, and I don't
  554. # like the idea to print it out.
  555. raise IMDbDataAccessError( \
  556. 'unable to connect to the database server')
  557. self._role = {}
  558. for rl in RoleType.select():
  559. self._role[rl.id] = str(rl.role)
  560. self._info = {}
  561. self._infoRev = {}
  562. for inf in InfoType.select():
  563. self._info[inf.id] = str(inf.info)
  564. self._infoRev[str(inf.info)] = inf.id
  565. self._compType = {}
  566. for cType in CompanyType.select():
  567. self._compType[cType.id] = cType.kind
  568. info = [(it.id, it.info) for it in InfoType.select()]
  569. self._compcast = {}
  570. for cc in CompCastType.select():
  571. self._compcast[cc.id] = str(cc.kind)
  572. self._link = {}
  573. for lt in LinkType.select():
  574. self._link[lt.id] = str(lt.link)
  575. self._moviesubs = {}
  576. # Build self._moviesubs, a dictionary used to rearrange
  577. # the data structure for a movie object.
  578. for vid, vinfo in info:
  579. if not vinfo.startswith('LD '): continue
  580. self._moviesubs[vinfo] = ('laserdisc', vinfo[3:])
  581. self._moviesubs.update(_litd)
  582. self._moviesubs.update(_busd)
  583. self.do_adult_search(adultSearch)
  584. def _findRefs(self, o, trefs, nrefs):
  585. """Find titles or names references in strings."""
  586. if isinstance(o, (unicode, str)):
  587. for title in re_titleRef.findall(o):
  588. a_title = analyze_title(title, canonical=0)
  589. rtitle = build_title(a_title, ptdf=1)
  590. if trefs.has_key(rtitle): continue
  591. movieID = self._getTitleID(rtitle)
  592. if movieID is None:
  593. movieID = self._getTitleID(title)
  594. if movieID is None:
  595. continue
  596. m = Movie(title=rtitle, movieID=movieID,
  597. accessSystem=self.accessSystem)
  598. trefs[rtitle] = m
  599. rtitle2 = canonicalTitle(a_title.get('title', u''))
  600. if rtitle2 and rtitle2 != rtitle and rtitle2 != title:
  601. trefs[rtitle2] = m
  602. if title != rtitle:
  603. trefs[title] = m
  604. for name in re_nameRef.findall(o):
  605. a_name = analyze_name(name, canonical=1)
  606. rname = build_name(a_name, canonical=1)
  607. if nrefs.has_key(rname): continue
  608. personID = self._getNameID(rname)
  609. if personID is None:
  610. personID = self._getNameID(name)
  611. if personID is None: continue
  612. p = Person(name=rname, personID=personID,
  613. accessSystem=self.accessSystem)
  614. nrefs[rname] = p
  615. rname2 = normalizeName(a_name.get('name', u''))
  616. if rname2 and rname2 != rname:
  617. nrefs[rname2] = p
  618. if name != rname and name != rname2:
  619. nrefs[name] = p
  620. elif isinstance(o, (list, tuple)):
  621. for item in o:
  622. self._findRefs(item, trefs, nrefs)
  623. elif isinstance(o, dict):
  624. for value in o.values():
  625. self._findRefs(value, trefs, nrefs)
  626. return (trefs, nrefs)
  627. def _extractRefs(self, o):
  628. """Scan for titles or names references in strings."""
  629. trefs = {}
  630. nrefs = {}
  631. try:
  632. return self._findRefs(o, trefs, nrefs)
  633. except RuntimeError, e:
  634. # Symbian/python 2.2 has a poor regexp implementation.
  635. import warnings
  636. warnings.warn('RuntimeError in '
  637. "imdb.parser.sql.IMDbSqlAccessSystem; "
  638. "if it's not a recursion limit exceeded and we're not "
  639. "running in a Symbian environment, it's a bug:\n%s" % e)
  640. return (trefs, nrefs)
  641. def _changeAKAencoding(self, akanotes, akatitle):
  642. """Return akatitle in the correct charset, as specified in
  643. the akanotes field; if akatitle doesn't need to be modified,
  644. return None."""
  645. oti = akanotes.find('(original ')
  646. if oti == -1: return None
  647. ote = akanotes[oti+10:].find(' title)')
  648. if ote != -1:
  649. cs_info = akanotes[oti+10:oti+10+ote].lower().split()
  650. for e in cs_info:
  651. # excludes some strings that clearly are not encoding.
  652. if e in ('script', '', 'cyrillic', 'greek'): continue
  653. if e.startswith('iso-') and e.find('latin') != -1:
  654. e = e[4:].replace('-', '')
  655. try:
  656. lookup(e)
  657. lat1 = akatitle.encode('latin_1', 'replace')
  658. return unicode(lat1, e, 'replace')
  659. except (LookupError, ValueError, TypeError):
  660. continue
  661. return None
  662. def _buildNULLCondition(self, col, val):
  663. """Build a comparison for columns where values can be NULL."""
  664. if val is None:
  665. return ISNULL(col)
  666. else:
  667. if isinstance(val, (int, long)):
  668. return col == val
  669. else:
  670. return col == self.toUTF8(val)
  671. def _getTitleID(self, title):
  672. """Given a long imdb canonical title, returns a movieID or
  673. None if not found."""
  674. td = analyze_title(title)
  675. condition = None
  676. if td['kind'] == 'episode':
  677. epof = td['episode of']
  678. seriesID = [s.id for s in Title.select(
  679. AND(Title.q.title == self.toUTF8(epof['title']),
  680. self._buildNULLCondition(Title.q.imdbIndex,
  681. epof.get('imdbIndex')),
  682. Title.q.kindID == self._kindRev[epof['kind']],
  683. self._buildNULLCondition(Title.q.productionYear,
  684. epof.get('year'))))]
  685. if seriesID:
  686. condition = AND(IN(Title.q.episodeOfID, seriesID),
  687. Title.q.title == self.toUTF8(td['title']),
  688. self._buildNULLCondition(Title.q.imdbIndex,
  689. td.get('imdbIndex')),
  690. Title.q.kindID == self._kindRev[td['kind']],
  691. self._buildNULLCondition(Title.q.productionYear,
  692. td.get('year')))
  693. if condition is None:
  694. condition = AND(Title.q.title == self.toUTF8(td['title']),
  695. self._buildNULLCondition(Title.q.imdbIndex,
  696. td.get('imdbIndex')),
  697. Title.q.kindID == self._kindRev[td['kind']],
  698. self._buildNULLCondition(Title.q.productionYear,
  699. td.get('year')))
  700. res = Title.select(condition)
  701. try:
  702. if res.count() != 1:
  703. return None
  704. except (UnicodeDecodeError, TypeError):
  705. return None
  706. return res[0].id
  707. def _getNameID(self, name):
  708. """Given a long imdb canonical name, returns a personID or
  709. None if not found."""
  710. nd = analyze_name(name)
  711. res = Name.select(AND(Name.q.name == self.toUTF8(nd['name']),
  712. self._buildNULLCondition(Name.q.imdbIndex,
  713. nd.get('imdbIndex'))))
  714. try:
  715. c = res.count()
  716. if res.count() != 1:
  717. return None
  718. except (UnicodeDecodeError, TypeError):
  719. return None
  720. return res[0].id
  721. def _normalize_movieID(self, movieID):
  722. """Normalize the given movieID."""
  723. try:
  724. return int(movieID)
  725. except (ValueError, OverflowError):
  726. raise IMDbError('movieID "%s" can\'t be converted to integer' % \
  727. movieID)
  728. def _normalize_personID(self, personID):
  729. """Normalize the given personID."""
  730. try:
  731. return int(personID)
  732. except (ValueError, OverflowError):
  733. raise IMDbError('personID "%s" can\'t be converted to integer' % \
  734. personID)
  735. def _normalize_characterID(self, characterID):
  736. """Normalize the given characterID."""
  737. try:
  738. return int(characterID)
  739. except (ValueError, OverflowError):
  740. raise IMDbError('characterID "%s" can\'t be converted to integer' \
  741. % characterID)
  742. def _normalize_companyID(self, companyID):
  743. """Normalize the given companyID."""
  744. try:
  745. return int(companyID)
  746. except (ValueError, OverflowError):
  747. raise IMDbError('companyID "%s" can\'t be converted to integer' \
  748. % companyID)
  749. def get_imdbMovieID(self, movieID):
  750. """Translate a movieID in an imdbID.
  751. If not in the database, try an Exact Primary Title search on IMDb;
  752. return None if it's unable to get the imdbID.
  753. """
  754. try: movie = Title.get(movieID)
  755. except NotFoundError: return None
  756. imdbID = movie.imdbID
  757. if imdbID is not None: return '%07d' % imdbID
  758. m_dict = get_movie_data(movie.id, self._kind)
  759. titline = build_title(m_dict, ptdf=1)
  760. imdbID = self.title2imdbID(titline)
  761. # If the imdbID was retrieved from the web and was not in the
  762. # database, update the database (ignoring errors, because it's
  763. # possibile that the current user has not update privileges).
  764. # There're times when I think I'm a genius; this one of
  765. # those times... <g>
  766. if imdbID is not None:
  767. try: movie.imdbID = int(imdbID)
  768. except: pass
  769. return imdbID
  770. def get_imdbPersonID(self, personID):
  771. """Translate a personID in an imdbID.
  772. If not in the database, try an Exact Primary Name search on IMDb;
  773. return None if it's unable to get the imdbID.
  774. """
  775. try: person = Name.get(personID)
  776. except NotFoundError: return None
  777. imdbID = person.imdbID
  778. if imdbID is not None: return '%07d' % imdbID
  779. n_dict = {'name': person.name, 'imdbIndex': person.imdbIndex}
  780. namline = build_name(n_dict, canonical=1)
  781. imdbID = self.name2imdbID(namline)
  782. if imdbID is not None:
  783. try: person.imdbID = int(imdbID)
  784. except: pass
  785. return imdbID
  786. def get_imdbCharacterID(self, characterID):
  787. """Translate a characterID in an imdbID.
  788. If not in the database, try an Exact Primary Name search on IMDb;
  789. return None if it's unable to get the imdbID.
  790. """
  791. try: character = CharName.get(characterID)
  792. except NotFoundError: return None
  793. imdbID = character.imdbID
  794. if imdbID is not None: return '%07d' % imdbID
  795. n_dict = {'name': character.name, 'imdbIndex': character.imdbIndex}
  796. namline = build_name(n_dict, canonical=1)
  797. imdbID = self.character2imdbID(namline)
  798. if imdbID is not None:
  799. try: character.imdbID = int(imdbID)
  800. except: pass
  801. return imdbID
  802. def get_imdbCompanyID(self, companyID):
  803. """Translate a companyID in an imdbID.
  804. If not in the database, try an Exact Primary Name search on IMDb;
  805. return None if it's unable to get the imdbID.
  806. """
  807. try: company = CompanyName.get(companyID)
  808. except NotFoundError: return None
  809. imdbID = company.imdbID
  810. if imdbID is not None: return '%07d' % imdbID
  811. n_dict = {'name': company.name, 'country': company.countryCode}
  812. namline = build_company_name(n_dict)
  813. imdbID = self.company2imdbID(namline)
  814. if imdbID is not None:
  815. try: company.imdbID = int(imdbID)
  816. except: pass
  817. return imdbID
  818. def do_adult_search(self, doAdult):
  819. """If set to 0 or False, movies in the Adult category are not
  820. episodeOf = title_dict.get('episode of')
  821. shown in the results of a search."""
  822. self.doAdult = doAdult
  823. def _search_movie(self, title, results, _episodes=False):
  824. title = title.strip()
  825. if not title: return []
  826. title_dict = analyze_title(title, canonical=1)
  827. s_title = title_dict['title']
  828. if not s_title: return []
  829. episodeOf = title_dict.get('episode of')
  830. if episodeOf:
  831. _episodes = False
  832. s_title_split = s_title.split(', ')
  833. if len(s_title_split) > 1 and \
  834. s_title_split[-1].lower() in _unicodeArticles:
  835. s_title_rebuilt = ', '.join(s_title_split[:-1])
  836. if s_title_rebuilt:
  837. s_title = s_title_rebuilt
  838. #if not episodeOf:
  839. # if not _episodes:
  840. # s_title_split = s_title.split(', ')
  841. # if len(s_title_split) > 1 and \
  842. # s_title_split[-1].lower() in _articles:
  843. # s_title_rebuilt = ', '.join(s_title_split[:-1])
  844. # if s_title_rebuilt:
  845. # s_title = s_title_rebuilt
  846. #else:
  847. # _episodes = False
  848. if isinstance(s_title, unicode):
  849. s_title = s_title.encode('ascii', 'ignore')
  850. soundexCode = soundex(s_title)
  851. # XXX: improve the search restricting the kindID if the
  852. # "kind" of the input differs from "movie"?
  853. condition = conditionAka = None
  854. if _episodes:
  855. condition = AND(Title.q.phoneticCode == soundexCode,
  856. Title.q.kindID == self._kindRev['episode'])
  857. conditionAka = AND(AkaTitle.q.phoneticCode == soundexCode,
  858. AkaTitle.q.kindID == self._kindRev['episode'])
  859. elif title_dict['kind'] == 'episode' and episodeOf is not None:
  860. # set canonical=0 ? Should not make much difference.
  861. series_title = build_title(episodeOf, canonical=1)
  862. # XXX: is it safe to get "results" results?
  863. # Too many? Too few?
  864. serRes = results
  865. if serRes < 3 or serRes > 10:
  866. serRes = 10
  867. searchSeries = self._search_movie(series_title, serRes)
  868. seriesIDs = [result[0] for result in searchSeries]
  869. if seriesIDs:
  870. condition = AND(Title.q.phoneticCode == soundexCode,
  871. IN(Title.q.episodeOfID, seriesIDs),
  872. Title.q.kindID == self._kindRev['episode'])
  873. conditionAka = AND(AkaTitle.q.phoneticCode == soundexCode,
  874. IN(AkaTitle.q.episodeOfID, seriesIDs),
  875. AkaTitle.q.kindID == self._kindRev['episode'])
  876. else:
  877. # XXX: bad situation: we have found no matching series;
  878. # try searching everything (both episodes and
  879. # non-episodes) for the title.
  880. condition = AND(Title.q.phoneticCode == soundexCode,
  881. IN(Title.q.episodeOfID, seriesIDs))
  882. conditionAka = AND(AkaTitle.q.phoneticCode == soundexCode,
  883. IN(AkaTitle.q.episodeOfID, seriesIDs))
  884. if condition is None:
  885. # XXX: excludes episodes?
  886. condition = AND(Title.q.kindID != self._kindRev['episode'],
  887. Title.q.phoneticCode == soundexCode)
  888. conditionAka = AND(AkaTitle.q.kindID != self._kindRev['episode'],
  889. AkaTitle.q.phoneticCode == soundexCode)
  890. # Up to 3 variations of the title are searched, plus the
  891. # long imdb canonical title, if provided.
  892. if not _episodes:
  893. title1, title2, title3 = titleVariations(title)
  894. else:
  895. title1 = title
  896. title2 = ''
  897. title3 = ''
  898. try:
  899. qr = [(q.id, get_movie_data(q.id, self._kind))
  900. for q in Title.select(condition)]
  901. q2 = [(q.movieID, get_movie_data(q.id, self._kind, fromAka=1))
  902. for q in AkaTitle.select(conditionAka)]
  903. qr += q2
  904. except NotFoundError, e:
  905. raise IMDbDataAccessError( \
  906. 'unable to search the database: "%s"' % str(e))
  907. resultsST = results * 3
  908. res = scan_titles(qr, title1, title2, title3, resultsST,
  909. searchingEpisode=episodeOf is not None,
  910. onlyEpisodes=_episodes,
  911. ro_thresold=0.0)
  912. res[:] = [x[1] for x in res]
  913. if res and not self.doAdult:
  914. mids = [x[0] for x in res]
  915. genreID = self._infoRev['genres']
  916. adultlist = [al.movieID for al
  917. in MovieInfo.select(
  918. AND(MovieInfo.q.infoTypeID == genreID,
  919. MovieInfo.q.info == 'Adult',
  920. IN(MovieInfo.q.movieID, mids)))]
  921. res[:] = [x for x in res if x[0] not in adultlist]
  922. new_res = []
  923. # XXX: can there be duplicates?
  924. for r in res:
  925. if r not in q2:
  926. new_res.append(r)
  927. continue
  928. mdict = r[1]
  929. aka_title = build_title(mdict, ptdf=1)
  930. orig_dict = get_movie_data(r[0], self._kind)
  931. orig_title = build_title(orig_dict, ptdf=1)
  932. if aka_title == orig_title:
  933. new_res.append(r)
  934. continue
  935. orig_dict['akas'] = [aka_title]
  936. new_res.append((r[0], orig_dict))
  937. if results > 0: new_res[:] = new_res[:results]
  938. return new_res
  939. def _search_episode(self, title, results):
  940. return self._search_movie(title, results, _episodes=True)
  941. def get_movie_main(self, movieID):
  942. # Every movie information is retrieved from here.
  943. infosets = self.get_movie_infoset()
  944. try:
  945. res = get_movie_data(movieID, self._kind)
  946. except NotFoundError, e:
  947. raise IMDbDataAccessError( \
  948. 'unable to get movieID "%s": "%s"' % (movieID, str(e)))
  949. if not res:
  950. raise IMDbDataAccessError('unable to get movieID "%s"' % movieID)
  951. # Collect cast information.
  952. castdata = [[cd.personID, cd.personRoleID, cd.note, cd.nrOrder,
  953. self._role[cd.roleID]]
  954. for cd in CastInfo.select(CastInfo.q.movieID == movieID)]
  955. for p in castdata:
  956. person = Name.get(p[0])
  957. p += [person.name, person.imdbIndex]
  958. if p[4] in ('actor', 'actress'):
  959. p[4] = 'cast'
  960. # Regroup by role/duty (cast, writer, director, ...)
  961. castdata[:] = _groupListBy(castdata, 4)
  962. for group in castdata:
  963. duty = group[0][4]
  964. for pdata in group:
  965. curRole = pdata[1]
  966. curRoleID = None
  967. if curRole is not None:
  968. robj = CharName.get(curRole)
  969. curRole = robj.name
  970. curRoleID = robj.id
  971. p = Person(personID=pdata[0], name=pdata[5],
  972. currentRole=curRole or u'',
  973. roleID=curRoleID,
  974. notes=pdata[2] or u'',
  975. accessSystem='sql')
  976. if pdata[6]: p['imdbIndex'] = pdata[6]
  977. p.billingPos = pdata[3]
  978. res.setdefault(duty, []).append(p)
  979. if duty == 'cast':
  980. res[duty] = merge_roles(res[duty])
  981. res[duty].sort()
  982. # Info about the movie.
  983. minfo = [(self._info[m.infoTypeID], m.info, m.note)
  984. for m in MovieInfo.select(MovieInfo.q.movieID == movieID)]
  985. minfo += [(self._info[m.infoTypeID], m.info, m.note)
  986. for m in MovieInfoIdx.select(MovieInfoIdx.q.movieID == movieID)]
  987. minfo += [('keywords', Keyword.get(m.keywordID).keyword, None)
  988. for m in MovieKeyword.select(MovieKeyword.q.movieID == movieID)]
  989. minfo = _groupListBy(minfo, 0)
  990. for group in minfo:
  991. sect = group[0][0]
  992. for mdata in group:
  993. data = mdata[1]
  994. if mdata[2]: data += '::%s' % mdata[2]
  995. res.setdefault(sect, []).append(data)
  996. # Companies info about a movie.
  997. cinfo = [(self._compType[m.companyTypeID], m.companyID, m.note) for m
  998. in MovieCompanies.select(MovieCompanies.q.movieID == movieID)]
  999. cinfo = _groupListBy(cinfo, 0)
  1000. for group in cinfo:
  1001. sect = group[0][0]
  1002. for mdata in group:
  1003. cDb = CompanyName.get(mdata[1])
  1004. cDbTxt = cDb.name
  1005. if cDb.countryCode:
  1006. cDbTxt += ' %s' % cDb.countryCode
  1007. company = Company(name=cDbTxt,
  1008. companyID=mdata[1],
  1009. notes=mdata[2] or u'',
  1010. accessSystem=self.accessSystem)
  1011. res.setdefault(sect, []).append(company)
  1012. # AKA titles.
  1013. akat = [(get_movie_data(at.id, self._kind, fromAka=1), at.note)
  1014. for at in AkaTitle.select(AkaTitle.q.movieID == movieID)]
  1015. if akat:
  1016. res['akas'] = []
  1017. for td, note in akat:
  1018. nt = build_title(td, ptdf=1)
  1019. if note:
  1020. net = self._changeAKAencoding(note, nt)
  1021. if net is not None: nt = net
  1022. nt += '::%s' % note
  1023. if nt not in res['akas']: res['akas'].append(nt)
  1024. # Complete cast/crew.
  1025. compcast = [(self._compcast[cc.subjectID], self._compcast[cc.statusID])
  1026. for cc in CompleteCast.select(CompleteCast.q.movieID == movieID)]
  1027. if compcast:
  1028. for entry in compcast:
  1029. val = unicode(entry[1])
  1030. res[u'complete %s' % entry[0]] = val
  1031. # Movie connections.
  1032. mlinks = [[ml.linkedMovieID, self._link[ml.linkTypeID]]
  1033. for ml in MovieLink.select(MovieLink.q.movieID == movieID)]
  1034. if mlinks:
  1035. for ml in mlinks:
  1036. lmovieData = get_movie_data(ml[0], self._kind)
  1037. m = Movie(movieID=ml[0], data=lmovieData, accessSystem='sql')
  1038. ml[0] = m
  1039. res['connections'] = {}
  1040. mlinks[:] = _groupListBy(mlinks, 1)
  1041. for group in mlinks:
  1042. lt = group[0][1]
  1043. res['connections'][lt] = [i[0] for i in group]
  1044. # Episodes.
  1045. episodes = {}
  1046. eps_list = list(Title.select(Title.q.episodeOfID == movieID))
  1047. eps_list.sort()
  1048. if eps_list:
  1049. ps_data = {'title': res['title'], 'kind': res['kind'],
  1050. 'year': res.get('year'),
  1051. 'imdbIndex': res.get('imdbIndex')}
  1052. parentSeries = Movie(movieID=movieID, data=ps_data,
  1053. accessSystem='sql')
  1054. for episode in eps_list:
  1055. episodeID = episode.id
  1056. episode_data = get_movie_data(episodeID, self._kind)
  1057. m = Movie(movieID=episodeID, data=episode_data,
  1058. accessSystem='sql')
  1059. m['episode of'] = parentSeries
  1060. season = episode_data.get('season', 'UNKNOWN')
  1061. if season not in episodes: episodes[season] = {}
  1062. ep_number = episode_data.get('episode')
  1063. if ep_number is None:
  1064. ep_number = max((episodes[season].keys() or [0])) + 1
  1065. episodes[season][ep_number] = m
  1066. res['episodes'] = episodes
  1067. res['number of episodes'] = sum([len(x) for x in episodes.values()])
  1068. res['number of seasons'] = len(episodes.keys())
  1069. # Regroup laserdisc information.
  1070. res = _reGroupDict(res, self._moviesubs)
  1071. # Do some transformation to preserve consistency with other
  1072. # data access systems.
  1073. if 'quotes' in res:
  1074. for idx, quote in enumerate(res['quotes']):
  1075. res['quotes'][idx] = quote.split('::')
  1076. if 'runtimes' in res and len(res['runtimes']) > 0:
  1077. rt = res['runtimes'][0]
  1078. episodes = re_episodes.findall(rt)
  1079. if episodes:
  1080. res['runtimes'][0] = re_episodes.sub('', rt)
  1081. if res['runtimes'][0][-2:] == '::':
  1082. res['runtimes'][0] = res['runtimes'][0][:-2]
  1083. if 'votes' in res:
  1084. res['votes'] = int(res['votes'][0])
  1085. if 'rating' in res:
  1086. res['rating'] = float(res['rating'][0])
  1087. if 'votes distribution' in res:
  1088. res['votes distribution'] = res['votes distribution'][0]
  1089. if 'mpaa' in res:
  1090. res['mpaa'] = res['mpaa'][0]
  1091. if 'top 250 rank' in res:
  1092. try: res['top 250 rank'] = int(res['top 250 rank'])
  1093. except: pass
  1094. if 'bottom 10 rank' in res:
  1095. try: res['bottom 100 rank'] = int(res['bottom 10 rank'])
  1096. except: pass
  1097. del res['bottom 10 rank']
  1098. for old, new in [('guest', 'guests'), ('trademarks', 'trade-mark'),
  1099. ('articles', 'article'), ('pictorials', 'pictorial'),
  1100. ('magazine-covers', 'magazine-cover-photo')]:
  1101. if old in res:
  1102. res[new] = res[old]
  1103. del res[old]
  1104. trefs,nrefs = {}, {}
  1105. trefs,nrefs = self._extractRefs(sub_dict(res,Movie.keys_tomodify_list))
  1106. return {'data': res, 'titlesRefs': trefs, 'namesRefs': nrefs,
  1107. 'info sets': infosets}
  1108. # Just to know what kind of information are available.
  1109. get_movie_alternate_versions = get_movie_main
  1110. get_movie_business = get_movie_main
  1111. get_movie_connections = get_movie_main
  1112. get_movie_crazy_credits = get_movie_main
  1113. get_movie_goofs = get_movie_main
  1114. get_movie_keywords = get_movie_main
  1115. get_movie_literature = get_movie_main
  1116. get_movie_locations = get_movie_main
  1117. get_movie_plot = get_movie_main
  1118. get_movie_quotes = get_movie_main
  1119. get_movie_release_dates = get_movie_main
  1120. get_movie_soundtrack = get_movie_main
  1121. get_movie_taglines = get_movie_main
  1122. get_movie_technical = get_movie_main
  1123. get_movie_trivia = get_movie_main
  1124. get_movie_vote_details = get_movie_main
  1125. get_movie_episodes = get_movie_main
  1126. def _search_person(self, name, results):
  1127. name = name.strip()
  1128. if not name: return []
  1129. s_name = analyze_name(name)['name']
  1130. if not s_name: return []
  1131. if isinstance(s_na