PageRenderTime 55ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 0ms

/nltk/stem/lancaster.py

https://github.com/BrucePHill/nltk
Python | 313 lines | 246 code | 27 blank | 40 comment | 28 complexity | 64e246e7607a40d27e51a9e547673ea1 MD5 | raw file
Possible License(s): Apache-2.0
  1. # Natural Language Toolkit: Stemmers
  2. #
  3. # Copyright (C) 2001-2013 NLTK Project
  4. # Author: Steven Tomcavage <stomcava@law.upenn.edu>
  5. # URL: <http://www.nltk.org/>
  6. # For license information, see LICENSE.TXT
  7. """
  8. A word stemmer based on the Lancaster stemming algorithm.
  9. Paice, Chris D. "Another Stemmer." ACM SIGIR Forum 24.3 (1990): 56-61.
  10. """
  11. from __future__ import unicode_literals
  12. import re
  13. from .api import StemmerI
  14. from nltk.compat import python_2_unicode_compatible
  15. @python_2_unicode_compatible
  16. class LancasterStemmer(StemmerI):
  17. """
  18. Lancaster Stemmer
  19. >>> from nltk.stem.lancaster import LancasterStemmer
  20. >>> st = LancasterStemmer()
  21. >>> st.stem('maximum') # Remove "-um" when word is intact
  22. 'maxim'
  23. >>> st.stem('presumably') # Don't remove "-um" when word is not intact
  24. 'presum'
  25. >>> st.stem('multiply') # No action taken if word ends with "-ply"
  26. 'multiply'
  27. >>> st.stem('provision') # Replace "-sion" with "-j" to trigger "j" set of rules
  28. 'provid'
  29. >>> st.stem('owed') # Word starting with vowel must contain at least 2 letters
  30. 'ow'
  31. >>> st.stem('ear') # ditto
  32. 'ear'
  33. >>> st.stem('saying') # Words starting with consonant must contain at least 3
  34. 'say'
  35. >>> st.stem('crying') # letters and one of those letters must be a vowel
  36. 'cry'
  37. >>> st.stem('string') # ditto
  38. 'string'
  39. >>> st.stem('meant') # ditto
  40. 'meant'
  41. >>> st.stem('cement') # ditto
  42. 'cem'
  43. """
  44. # The rule list is static since it doesn't change between instances
  45. rule_tuple = (
  46. "ai*2.", # -ia > - if intact
  47. "a*1.", # -a > - if intact
  48. "bb1.", # -bb > -b
  49. "city3s.", # -ytic > -ys
  50. "ci2>", # -ic > -
  51. "cn1t>", # -nc > -nt
  52. "dd1.", # -dd > -d
  53. "dei3y>", # -ied > -y
  54. "deec2ss.", # -ceed >", -cess
  55. "dee1.", # -eed > -ee
  56. "de2>", # -ed > -
  57. "dooh4>", # -hood > -
  58. "e1>", # -e > -
  59. "feil1v.", # -lief > -liev
  60. "fi2>", # -if > -
  61. "gni3>", # -ing > -
  62. "gai3y.", # -iag > -y
  63. "ga2>", # -ag > -
  64. "gg1.", # -gg > -g
  65. "ht*2.", # -th > - if intact
  66. "hsiug5ct.", # -guish > -ct
  67. "hsi3>", # -ish > -
  68. "i*1.", # -i > - if intact
  69. "i1y>", # -i > -y
  70. "ji1d.", # -ij > -id -- see nois4j> & vis3j>
  71. "juf1s.", # -fuj > -fus
  72. "ju1d.", # -uj > -ud
  73. "jo1d.", # -oj > -od
  74. "jeh1r.", # -hej > -her
  75. "jrev1t.", # -verj > -vert
  76. "jsim2t.", # -misj > -mit
  77. "jn1d.", # -nj > -nd
  78. "j1s.", # -j > -s
  79. "lbaifi6.", # -ifiabl > -
  80. "lbai4y.", # -iabl > -y
  81. "lba3>", # -abl > -
  82. "lbi3.", # -ibl > -
  83. "lib2l>", # -bil > -bl
  84. "lc1.", # -cl > c
  85. "lufi4y.", # -iful > -y
  86. "luf3>", # -ful > -
  87. "lu2.", # -ul > -
  88. "lai3>", # -ial > -
  89. "lau3>", # -ual > -
  90. "la2>", # -al > -
  91. "ll1.", # -ll > -l
  92. "mui3.", # -ium > -
  93. "mu*2.", # -um > - if intact
  94. "msi3>", # -ism > -
  95. "mm1.", # -mm > -m
  96. "nois4j>", # -sion > -j
  97. "noix4ct.", # -xion > -ct
  98. "noi3>", # -ion > -
  99. "nai3>", # -ian > -
  100. "na2>", # -an > -
  101. "nee0.", # protect -een
  102. "ne2>", # -en > -
  103. "nn1.", # -nn > -n
  104. "pihs4>", # -ship > -
  105. "pp1.", # -pp > -p
  106. "re2>", # -er > -
  107. "rae0.", # protect -ear
  108. "ra2.", # -ar > -
  109. "ro2>", # -or > -
  110. "ru2>", # -ur > -
  111. "rr1.", # -rr > -r
  112. "rt1>", # -tr > -t
  113. "rei3y>", # -ier > -y
  114. "sei3y>", # -ies > -y
  115. "sis2.", # -sis > -s
  116. "si2>", # -is > -
  117. "ssen4>", # -ness > -
  118. "ss0.", # protect -ss
  119. "suo3>", # -ous > -
  120. "su*2.", # -us > - if intact
  121. "s*1>", # -s > - if intact
  122. "s0.", # -s > -s
  123. "tacilp4y.", # -plicat > -ply
  124. "ta2>", # -at > -
  125. "tnem4>", # -ment > -
  126. "tne3>", # -ent > -
  127. "tna3>", # -ant > -
  128. "tpir2b.", # -ript > -rib
  129. "tpro2b.", # -orpt > -orb
  130. "tcud1.", # -duct > -duc
  131. "tpmus2.", # -sumpt > -sum
  132. "tpec2iv.", # -cept > -ceiv
  133. "tulo2v.", # -olut > -olv
  134. "tsis0.", # protect -sist
  135. "tsi3>", # -ist > -
  136. "tt1.", # -tt > -t
  137. "uqi3.", # -iqu > -
  138. "ugo1.", # -ogu > -og
  139. "vis3j>", # -siv > -j
  140. "vie0.", # protect -eiv
  141. "vi2>", # -iv > -
  142. "ylb1>", # -bly > -bl
  143. "yli3y>", # -ily > -y
  144. "ylp0.", # protect -ply
  145. "yl2>", # -ly > -
  146. "ygo1.", # -ogy > -og
  147. "yhp1.", # -phy > -ph
  148. "ymo1.", # -omy > -om
  149. "ypo1.", # -opy > -op
  150. "yti3>", # -ity > -
  151. "yte3>", # -ety > -
  152. "ytl2.", # -lty > -l
  153. "yrtsi5.", # -istry > -
  154. "yra3>", # -ary > -
  155. "yro3>", # -ory > -
  156. "yfi3.", # -ify > -
  157. "ycn2t>", # -ncy > -nt
  158. "yca3>", # -acy > -
  159. "zi2>", # -iz > -
  160. "zy1s." # -yz > -ys
  161. )
  162. def __init__(self):
  163. """Create an instance of the Lancaster stemmer.
  164. """
  165. # Setup an empty rule dictionary - this will be filled in later
  166. self.rule_dictionary = {}
  167. def parseRules(self, rule_tuple):
  168. """Validate the set of rules used in this stemmer.
  169. """
  170. valid_rule = re.compile("^[a-z]+\*?\d[a-z]*[>\.]?$")
  171. # Empty any old rules from the rule set before adding new ones
  172. self.rule_dictionary = {}
  173. for rule in rule_tuple:
  174. if not valid_rule.match(rule):
  175. raise ValueError("The rule %s is invalid" % rule)
  176. first_letter = rule[0:1]
  177. if first_letter in self.rule_dictionary:
  178. self.rule_dictionary[first_letter].append(rule)
  179. else:
  180. self.rule_dictionary[first_letter] = [rule]
  181. def stem(self, word):
  182. """Stem a word using the Lancaster stemmer.
  183. """
  184. # Lower-case the word, since all the rules are lower-cased
  185. word = word.lower()
  186. # Save a copy of the original word
  187. intact_word = word
  188. # If the user hasn't supplied any rules, setup the default rules
  189. if len(self.rule_dictionary) == 0:
  190. self.parseRules(LancasterStemmer.rule_tuple)
  191. return self.__doStemming(word, intact_word)
  192. def __doStemming(self, word, intact_word):
  193. """Perform the actual word stemming
  194. """
  195. valid_rule = re.compile("^([a-z]+)(\*?)(\d)([a-z]*)([>\.]?)$")
  196. proceed = True
  197. while proceed:
  198. # Find the position of the last letter of the word to be stemmed
  199. last_letter_position = self.__getLastLetter(word)
  200. # Only stem the word if it has a last letter and a rule matching that last letter
  201. if last_letter_position < 0 or word[last_letter_position] not in self.rule_dictionary:
  202. proceed = False
  203. else:
  204. rule_was_applied = False
  205. # Go through each rule that matches the word's final letter
  206. for rule in self.rule_dictionary[word[last_letter_position]]:
  207. rule_match = valid_rule.match(rule)
  208. if rule_match:
  209. (ending_string,
  210. intact_flag,
  211. remove_total,
  212. append_string,
  213. cont_flag) = rule_match.groups()
  214. # Convert the number of chars to remove when stemming
  215. # from a string to an integer
  216. remove_total = int(remove_total)
  217. # Proceed if word's ending matches rule's word ending
  218. if word.endswith(ending_string[::-1]):
  219. if intact_flag:
  220. if (word == intact_word and
  221. self.__isAcceptable(word, remove_total)):
  222. word = self.__applyRule(word,
  223. remove_total,
  224. append_string)
  225. rule_was_applied = True
  226. if cont_flag == '.':
  227. proceed = False
  228. break
  229. elif self.__isAcceptable(word, remove_total):
  230. word = self.__applyRule(word,
  231. remove_total,
  232. append_string)
  233. rule_was_applied = True
  234. if cont_flag == '.':
  235. proceed = False
  236. break
  237. # If no rules apply, the word doesn't need any more stemming
  238. if rule_was_applied == False:
  239. proceed = False
  240. return word
  241. def __getLastLetter(self, word):
  242. """Get the zero-based index of the last alphabetic character in this string
  243. """
  244. last_letter = -1
  245. for position in range(len(word)):
  246. if word[position].isalpha():
  247. last_letter = position
  248. else:
  249. break
  250. return last_letter
  251. def __isAcceptable(self, word, remove_total):
  252. """Determine if the word is acceptable for stemming.
  253. """
  254. word_is_acceptable = False
  255. # If the word starts with a vowel, it must be at least 2
  256. # characters long to be stemmed
  257. if word[0] in "aeiouy":
  258. if (len(word) - remove_total >= 2):
  259. word_is_acceptable = True
  260. # If the word starts with a consonant, it must be at least 3
  261. # characters long (including one vowel) to be stemmed
  262. elif (len(word) - remove_total >= 3):
  263. if word[1] in "aeiouy":
  264. word_is_acceptable = True
  265. elif word[2] in "aeiouy":
  266. word_is_acceptable = True
  267. return word_is_acceptable
  268. def __applyRule(self, word, remove_total, append_string):
  269. """Apply the stemming rule to the word
  270. """
  271. # Remove letters from the end of the word
  272. new_word_length = len(word) - remove_total
  273. word = word[0:new_word_length]
  274. # And add new letters to the end of the truncated word
  275. if append_string:
  276. word += append_string
  277. return word
  278. def __repr__(self):
  279. return '<LancasterStemmer>'
  280. if __name__ == "__main__":
  281. import doctest
  282. doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)