PageRenderTime 50ms CodeModel.GetById 19ms RepoModel.GetById 1ms app.codeStats 0ms

/nltk/tokenize/treebank.py

https://github.com/haewoon/nltk
Python | 116 lines | 68 code | 15 blank | 33 comment | 4 complexity | 107555ce8bdcd83bb5185a3c75b395aa MD5 | raw file
Possible License(s): Apache-2.0
  1. # Natural Language Toolkit: Tokenizers
  2. #
  3. # Copyright (C) 2001-2012 NLTK Project
  4. # Author: Edward Loper <edloper@gradient.cis.upenn.edu>
  5. # Michael Heilman <mheilman@cmu.edu> (re-port from http://www.cis.upenn.edu/~treebank/tokenizer.sed)
  6. #
  7. # URL: <http://nltk.sourceforge.net>
  8. # For license information, see LICENSE.TXT
  9. r"""
  10. Penn Treebank Tokenizer
  11. The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank.
  12. This implementation is a port of the tokenizer sed script written by Robert McIntyre
  13. and available at http://www.cis.upenn.edu/~treebank/tokenizer.sed.
  14. """
  15. import re
  16. from nltk.tokenize.api import TokenizerI
  17. class TreebankWordTokenizer(TokenizerI):
  18. """
  19. The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank.
  20. This is the method that is invoked by ``word_tokenize()``. It assumes that the
  21. text has already been segmented into sentences, e.g. using ``sent_tokenize()``.
  22. This tokenizer performs the following steps:
  23. - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll``
  24. - treat most punctuation characters as separate tokens
  25. - split off commas and single quotes, when followed by whitespace
  26. - separate periods that appear at the end of line
  27. >>> from nltk.tokenize import TreebankWordTokenizer
  28. >>> s = '''Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks.'''
  29. >>> TreebankWordTokenizer().tokenize(s)
  30. ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York.',
  31. 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
  32. >>> s = "They'll save and invest more."
  33. >>> TreebankWordTokenizer().tokenize(s)
  34. ['They', "'ll", 'save', 'and', 'invest', 'more', '.']
  35. NB. this tokenizer assumes that the text is presented as one sentence per line,
  36. where each line is delimited with a newline character.
  37. The only periods to be treated as separate tokens are those appearing
  38. at the end of a line.
  39. """
  40. # List of contractions adapted from Robert MacIntyre's tokenizer.
  41. CONTRACTIONS2 = [re.compile(r"(?i)\b(can)(not)\b"),
  42. re.compile(r"(?i)\b(d)('ye)\b"),
  43. re.compile(r"(?i)\b(gim)(me)\b"),
  44. re.compile(r"(?i)\b(gon)(na)\b"),
  45. re.compile(r"(?i)\b(got)(ta)\b"),
  46. re.compile(r"(?i)\b(lem)(me)\b"),
  47. re.compile(r"(?i)\b(mor)('n)\b"),
  48. re.compile(r"(?i)\b(wan)(na) ")]
  49. CONTRACTIONS3 = [re.compile(r"(?i) ('t)(is)\b"),
  50. re.compile(r"(?i) ('t)(was)\b")]
  51. CONTRACTIONS4 = [re.compile(r"(?i)\b(whad)(dd)(ya)\b"),
  52. re.compile(r"(?i)\b(wha)(t)(cha)\b")]
  53. def tokenize(self, text):
  54. #starting quotes
  55. text = re.sub(r'^\"', r'``', text)
  56. text = re.sub(r'(``)', r' \1 ', text)
  57. text = re.sub(r'([ (\[{<])"', r'\1 `` ', text)
  58. #punctuation
  59. text = re.sub(r'([:,])([^\d])', r' \1 \2', text)
  60. text = re.sub(r'\.\.\.', r' ... ', text)
  61. text = re.sub(r'[;@#$%&]', r' \g<0> ', text)
  62. text = re.sub(r'([^\.])(\.)([\]\)}>"\']*)\s*$', r'\1 \2\3 ', text)
  63. text = re.sub(r'[?!]', r' \g<0> ', text)
  64. text = re.sub(r"([^'])' ", r"\1 ' ", text)
  65. #parens, brackets, etc.
  66. text = re.sub(r'[\]\[\(\)\{\}\<\>]', r' \g<0> ', text)
  67. text = re.sub(r'--', r' -- ', text)
  68. #add extra space to make things easier
  69. text = " " + text + " "
  70. #ending quotes
  71. text = re.sub(r'"', " '' ", text)
  72. text = re.sub(r'(\S)(\'\')', r'\1 \2 ', text)
  73. text = re.sub(r"([^' ])('[sS]|'[mM]|'[dD]|') ", r"\1 \2 ", text)
  74. text = re.sub(r"([^' ])('ll|'re|'ve|n't|) ", r"\1 \2 ", text)
  75. text = re.sub(r"([^' ])('LL|'RE|'VE|N'T|) ", r"\1 \2 ", text)
  76. for regexp in self.CONTRACTIONS2:
  77. text = regexp.sub(r' \1 \2 ', text)
  78. for regexp in self.CONTRACTIONS3:
  79. text = regexp.sub(r' \1 \2 ', text)
  80. # We are not using CONTRACTIONS4 since
  81. # they are also commented out in the SED scripts
  82. # for regexp in self.CONTRACTIONS4:
  83. # text = regexp.sub(r' \1 \2 \3 ', text)
  84. text = re.sub(" +", " ", text)
  85. text = text.strip()
  86. #add space at end to match up with MacIntyre's output (for debugging)
  87. if text != "":
  88. text += " "
  89. return text.split()
  90. if __name__ == "__main__":
  91. import doctest
  92. doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)