PageRenderTime 58ms CodeModel.GetById 24ms RepoModel.GetById 1ms app.codeStats 0ms

/books/PCI/code/chapter4/searchengine.py

https://github.com/kzfm1024/misc
Python | 306 lines | 283 code | 14 blank | 9 comment | 6 complexity | 39f127d033260df371795154845644d7 MD5 | raw file
  1. import urllib2
  2. from BeautifulSoup import *
  3. from urlparse import urljoin
  4. from pysqlite2 import dbapi2 as sqlite
  5. import nn
  6. mynet=nn.searchnet('nn.db')
  7. # Create a list of words to ignore
  8. ignorewords={'the':1,'of':1,'to':1,'and':1,'a':1,'in':1,'is':1,'it':1}
  9. class crawler:
  10. # Initialize the crawler with the name of database
  11. def __init__(self,dbname):
  12. self.con=sqlite.connect(dbname)
  13. def __del__(self):
  14. self.con.close()
  15. def dbcommit(self):
  16. self.con.commit()
  17. # Auxilliary function for getting an entry id and adding
  18. # it if it's not present
  19. def getentryid(self,table,field,value,createnew=True):
  20. cur=self.con.execute(
  21. "select rowid from %s where %s='%s'" % (table,field,value))
  22. res=cur.fetchone()
  23. if res==None:
  24. cur=self.con.execute(
  25. "insert into %s (%s) values ('%s')" % (table,field,value))
  26. return cur.lastrowid
  27. else:
  28. return res[0]
  29. # Index an individual page
  30. def addtoindex(self,url,soup):
  31. if self.isindexed(url): return
  32. print 'Indexing '+url
  33. # Get the individual words
  34. text=self.gettextonly(soup)
  35. words=self.separatewords(text)
  36. # Get the URL id
  37. urlid=self.getentryid('urllist','url',url)
  38. # Link each word to this url
  39. for i in range(len(words)):
  40. word=words[i]
  41. if word in ignorewords: continue
  42. wordid=self.getentryid('wordlist','word',word)
  43. self.con.execute("insert into wordlocation(urlid,wordid,location) values (%d,%d,%d)" % (urlid,wordid,i))
  44. # Extract the text from an HTML page (no tags)
  45. def gettextonly(self,soup):
  46. v=soup.string
  47. if v==Null:
  48. c=soup.contents
  49. resulttext=''
  50. for t in c:
  51. subtext=self.gettextonly(t)
  52. resulttext+=subtext+'\n'
  53. return resulttext
  54. else:
  55. return v.strip()
  56. # Seperate the words by any non-whitespace character
  57. def separatewords(self,text):
  58. splitter=re.compile('\\W*')
  59. return [s.lower() for s in splitter.split(text) if s!='']
  60. # Return true if this url is already indexed
  61. def isindexed(self,url):
  62. return False
  63. # Add a link between two pages
  64. def addlinkref(self,urlFrom,urlTo,linkText):
  65. words=self.separateWords(linkText)
  66. fromid=self.getentryid('urllist','url',urlFrom)
  67. toid=self.getentryid('urllist','url',urlTo)
  68. if fromid==toid: return
  69. cur=self.con.execute("insert into link(fromid,toid) values (%d,%d)" % (fromid,toid))
  70. linkid=cur.lastrowid
  71. for word in words:
  72. if word in ignorewords: continue
  73. wordid=self.getentryid('wordlist','word',word)
  74. self.con.execute("insert into linkwords(linkid,wordid) values (%d,%d)" % (linkid,wordid))
  75. # Starting with a list of pages, do a breadth
  76. # first search to the given depth, indexing pages
  77. # as we go
  78. def crawl(self,pages,depth=2):
  79. for i in range(depth):
  80. newpages={}
  81. for page in pages:
  82. try:
  83. c=urllib2.urlopen(page)
  84. except:
  85. print "Could not open %s" % page
  86. continue
  87. try:
  88. soup=BeautifulSoup(c.read())
  89. self.addtoindex(page,soup)
  90. links=soup('a')
  91. for link in links:
  92. if ('href' in dict(link.attrs)):
  93. url=urljoin(page,link['href'])
  94. if url.find("'")!=-1: continue
  95. url=url.split('#')[0] # remove location portion
  96. if url[0:4]=='http' and not self.isindexed(url):
  97. newpages[url]=1
  98. linkText=self.gettextonly(link)
  99. self.addlinkref(page,url,linkText)
  100. self.dbcommit()
  101. except:
  102. print "Could not parse page %s" % page
  103. pages=newpages
  104. # Create the database tables
  105. def createindextables(self):
  106. self.con.execute('create table urllist(url)')
  107. self.con.execute('create table wordlist(word)')
  108. self.con.execute('create table wordlocation(urlid,wordid,location)')
  109. self.con.execute('create table link(fromid integer,toid integer)')
  110. self.con.execute('create table linkwords(wordid,linkid)')
  111. self.con.execute('create index wordidx on wordlist(word)')
  112. self.con.execute('create index urlidx on urllist(url)')
  113. self.con.execute('create index wordurlidx on wordlocation(wordid)')
  114. self.con.execute('create index urltoidx on link(toid)')
  115. self.con.execute('create index urlfromidx on link(fromid)')
  116. self.dbcommit()
  117. def calculatepagerank(self,iterations=20):
  118. # clear out the current page rank tables
  119. self.con.execute('drop table if exists pagerank')
  120. self.con.execute('create table pagerank(urlid primary key,score)')
  121. # initialize every url with a page rank of 1
  122. for (urlid,) in self.con.execute('select rowid from urllist'):
  123. self.con.execute('insert into pagerank(urlid,score) values (%d,1.0)' % urlid)
  124. self.dbcommit()
  125. for i in range(iterations):
  126. print "Iteration %d" % (i)
  127. for (urlid,) in self.con.execute('select rowid from urllist'):
  128. pr=0.15
  129. # Loop through all the pages that link to this one
  130. for (linker,) in self.con.execute(
  131. 'select distinct fromid from link where toid=%d' % urlid):
  132. # Get the page rank of the linker
  133. linkingpr=self.con.execute(
  134. 'select score from pagerank where urlid=%d' % linker).fetchone()[0]
  135. # Get the total number of links from the linker
  136. linkingcount=self.con.execute(
  137. 'select count(*) from link where fromid=%d' % linker).fetchone()[0]
  138. pr+=0.85*(linkingpr/linkingcount)
  139. self.con.execute(
  140. 'update pagerank set score=%f where urlid=%d' % (pr,urlid))
  141. self.dbcommit()
  142. class searcher:
  143. def __init__(self,dbname):
  144. self.con=sqlite.connect(dbname)
  145. def __del__(self):
  146. self.con.close()
  147. def getmatchrows(self,q):
  148. # Strings to build the query
  149. fieldlist='w0.urlid'
  150. tablelist=''
  151. clauselist=''
  152. wordids=[]
  153. # Split the words by spaces
  154. words=q.split(' ')
  155. tablenumber=0
  156. for word in words:
  157. # Get the word ID
  158. wordrow=self.con.execute(
  159. "select rowid from wordlist where word='%s'" % word).fetchone()
  160. if wordrow!=None:
  161. wordid=wordrow[0]
  162. wordids.append(wordid)
  163. if tablenumber>0:
  164. tablelist+=','
  165. clauselist+=' and '
  166. clauselist+='w%d.urlid=w%d.urlid and ' % (tablenumber-1,tablenumber)
  167. fieldlist+=',w%d.location' % tablenumber
  168. tablelist+='wordlocation w%d' % tablenumber
  169. clauselist+='w%d.wordid=%d' % (tablenumber,wordid)
  170. tablenumber+=1
  171. # Create the query from the separate parts
  172. fullquery='select %s from %s where %s' % (fieldlist,tablelist,clauselist)
  173. print fullquery
  174. cur=self.con.execute(fullquery)
  175. rows=[row for row in cur]
  176. return rows,wordids
  177. def getscoredlist(self,rows,wordids):
  178. totalscores=dict([(row[0],0) for row in rows])
  179. # This is where we'll put our scoring functions
  180. weights=[(1.0,self.locationscore(rows)),
  181. (1.0,self.frequencyscore(rows)),
  182. (1.0,self.pagerankscore(rows)),
  183. (1.0,self.linktextscore(rows,wordids)),
  184. (5.0,self.nnscore(rows,wordids))]
  185. for (weight,scores) in weights:
  186. for url in totalscores:
  187. totalscores[url]+=weight*scores[url]
  188. return totalscores
  189. def geturlname(self,id):
  190. return self.con.execute(
  191. "select url from urllist where rowid=%d" % id).fetchone()[0]
  192. def query(self,q):
  193. rows,wordids=self.getmatchrows(q)
  194. scores=self.getscoredlist(rows,wordids)
  195. rankedscores=[(score,url) for (url,score) in scores.items()]
  196. rankedscores.sort()
  197. rankedscores.reverse()
  198. for (score,urlid) in rankedscores[0:10]:
  199. print '%f\t%s' % (score,self.geturlname(urlid))
  200. return wordids,[r[1] for r in rankedscores[0:10]]
  201. def normalizescores(self,scores,smallIsBetter=0):
  202. vsmall=0.00001 # Avoid division by zero errors
  203. if smallIsBetter:
  204. minscore=min(scores.values())
  205. return dict([(u,float(minscore)/max(vsmall,l)) for (u,l) in scores.items()])
  206. else:
  207. maxscore=max(scores.values())
  208. if maxscore==0: maxscore=vsmall
  209. return dict([(u,float(c)/maxscore) for (u,c) in scores.items()])
  210. def frequencyscore(self,rows):
  211. counts=dict([(row[0],0) for row in rows])
  212. for row in rows: counts[row[0]]+=1
  213. return self.normalizescores(counts)
  214. def locationscore(self,rows):
  215. locations=dict([(row[0],1000000) for row in rows])
  216. for row in rows:
  217. loc=sum(row[1:])
  218. if loc<locations[row[0]]: locations[row[0]]=loc
  219. return self.normalizescores(locations,smallIsBetter=1)
  220. def distancescore(self,rows):
  221. # If there's only one word, everyone wins!
  222. if len(rows[0])<=2: return dict([(row[0],1.0) for row in rows])
  223. # Initialize the dictionary with large values
  224. mindistance=dict([(row[0],1000000) for row in rows])
  225. for row in rows:
  226. dist=sum([abs(row[i]-row[i-1]) for i in range(2,len(row))])
  227. if dist<mindistance[row[0]]: mindistance[row[0]]=dist
  228. return self.normalizescores(mindistance,smallIsBetter=1)
  229. def inboundlinkscore(self,rows):
  230. uniqueurls=dict([(row[0],1) for row in rows])
  231. inboundcount=dict([(u,self.con.execute('select count(*) from link where toid=%d' % u).fetchone()[0]) for u in uniqueurls])
  232. return self.normalizescores(inboundcount)
  233. def linktextscore(self,rows,wordids):
  234. linkscores=dict([(row[0],0) for row in rows])
  235. for wordid in wordids:
  236. cur=self.con.execute('select link.fromid,link.toid from linkwords,link where wordid=%d and linkwords.linkid=link.rowid' % wordid)
  237. for (fromid,toid) in cur:
  238. if toid in linkscores:
  239. pr=self.con.execute('select score from pagerank where urlid=%d' % fromid).fetchone()[0]
  240. linkscores[toid]+=pr
  241. maxscore=max(linkscores.values())
  242. normalizedscores=dict([(u,float(l)/maxscore) for (u,l) in linkscores.items()])
  243. return normalizedscores
  244. def pagerankscore(self,rows):
  245. pageranks=dict([(row[0],self.con.execute('select score from pagerank where urlid=%d' % row[0]).fetchone()[0]) for row in rows])
  246. maxrank=max(pageranks.values())
  247. normalizedscores=dict([(u,float(l)/maxrank) for (u,l) in pageranks.items()])
  248. return normalizedscores
  249. def nnscore(self,rows,wordids):
  250. # Get unique URL IDs as an ordered list
  251. urlids=[urlid for urlid in dict([(row[0],1) for row in rows])]
  252. nnres=mynet.getresult(wordids,urlids)
  253. scores=dict([(urlids[i],nnres[i]) for i in range(len(urlids))])
  254. return self.normalizescores(scores)