PageRenderTime 56ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 1ms

/nltk/corpus/reader/util.py

https://github.com/haewoon/nltk
Python | 803 lines | 703 code | 33 blank | 67 comment | 35 complexity | 91f281aea8701e9e4833a66dc5fc5b59 MD5 | raw file
Possible License(s): Apache-2.0
  1. # Natural Language Toolkit: Corpus Reader Utilities
  2. #
  3. # Copyright (C) 2001-2012 NLTK Project
  4. # Author: Steven Bird <sb@ldc.upenn.edu>
  5. # Edward Loper <edloper@gradient.cis.upenn.edu>
  6. # URL: <http://www.nltk.org/>
  7. # For license information, see LICENSE.TXT
  8. import os
  9. import sys
  10. import bisect
  11. import re
  12. import tempfile
  13. try: import cPickle as pickle
  14. except ImportError: import pickle
  15. from itertools import islice
  16. # Use the c version of ElementTree, which is faster, if possible:
  17. try: from xml.etree import cElementTree as ElementTree
  18. except ImportError: from xml.etree import ElementTree
  19. from nltk.tokenize import wordpunct_tokenize
  20. from nltk.internals import slice_bounds
  21. from nltk.data import PathPointer, FileSystemPathPointer, ZipFilePathPointer
  22. from nltk.data import SeekableUnicodeStreamReader
  23. from nltk.sourcedstring import SourcedStringStream
  24. from nltk.util import AbstractLazySequence, LazySubsequence, LazyConcatenation
  25. ######################################################################
  26. #{ Corpus View
  27. ######################################################################
  28. class StreamBackedCorpusView(AbstractLazySequence):
  29. """
  30. A 'view' of a corpus file, which acts like a sequence of tokens:
  31. it can be accessed by index, iterated over, etc. However, the
  32. tokens are only constructed as-needed -- the entire corpus is
  33. never stored in memory at once.
  34. The constructor to ``StreamBackedCorpusView`` takes two arguments:
  35. a corpus fileid (specified as a string or as a ``PathPointer``);
  36. and a block reader. A "block reader" is a function that reads
  37. zero or more tokens from a stream, and returns them as a list. A
  38. very simple example of a block reader is:
  39. >>> def simple_block_reader(stream):
  40. ... return stream.readline().split()
  41. This simple block reader reads a single line at a time, and
  42. returns a single token (consisting of a string) for each
  43. whitespace-separated substring on the line.
  44. When deciding how to define the block reader for a given
  45. corpus, careful consideration should be given to the size of
  46. blocks handled by the block reader. Smaller block sizes will
  47. increase the memory requirements of the corpus view's internal
  48. data structures (by 2 integers per block). On the other hand,
  49. larger block sizes may decrease performance for random access to
  50. the corpus. (But note that larger block sizes will *not*
  51. decrease performance for iteration.)
  52. Internally, ``CorpusView`` maintains a partial mapping from token
  53. index to file position, with one entry per block. When a token
  54. with a given index *i* is requested, the ``CorpusView`` constructs
  55. it as follows:
  56. 1. First, it searches the toknum/filepos mapping for the token
  57. index closest to (but less than or equal to) *i*.
  58. 2. Then, starting at the file position corresponding to that
  59. index, it reads one block at a time using the block reader
  60. until it reaches the requested token.
  61. The toknum/filepos mapping is created lazily: it is initially
  62. empty, but every time a new block is read, the block's
  63. initial token is added to the mapping. (Thus, the toknum/filepos
  64. map has one entry per block.)
  65. In order to increase efficiency for random access patterns that
  66. have high degrees of locality, the corpus view may cache one or
  67. more blocks.
  68. :note: Each ``CorpusView`` object internally maintains an open file
  69. object for its underlying corpus file. This file should be
  70. automatically closed when the ``CorpusView`` is garbage collected,
  71. but if you wish to close it manually, use the ``close()``
  72. method. If you access a ``CorpusView``'s items after it has been
  73. closed, the file object will be automatically re-opened.
  74. :warning: If the contents of the file are modified during the
  75. lifetime of the ``CorpusView``, then the ``CorpusView``'s behavior
  76. is undefined.
  77. :warning: If a unicode encoding is specified when constructing a
  78. ``CorpusView``, then the block reader may only call
  79. ``stream.seek()`` with offsets that have been returned by
  80. ``stream.tell()``; in particular, calling ``stream.seek()`` with
  81. relative offsets, or with offsets based on string lengths, may
  82. lead to incorrect behavior.
  83. :ivar _block_reader: The function used to read
  84. a single block from the underlying file stream.
  85. :ivar _toknum: A list containing the token index of each block
  86. that has been processed. In particular, ``_toknum[i]`` is the
  87. token index of the first token in block ``i``. Together
  88. with ``_filepos``, this forms a partial mapping between token
  89. indices and file positions.
  90. :ivar _filepos: A list containing the file position of each block
  91. that has been processed. In particular, ``_toknum[i]`` is the
  92. file position of the first character in block ``i``. Together
  93. with ``_toknum``, this forms a partial mapping between token
  94. indices and file positions.
  95. :ivar _stream: The stream used to access the underlying corpus file.
  96. :ivar _len: The total number of tokens in the corpus, if known;
  97. or None, if the number of tokens is not yet known.
  98. :ivar _eofpos: The character position of the last character in the
  99. file. This is calculated when the corpus view is initialized,
  100. and is used to decide when the end of file has been reached.
  101. :ivar _cache: A cache of the most recently read block. It
  102. is encoded as a tuple (start_toknum, end_toknum, tokens), where
  103. start_toknum is the token index of the first token in the block;
  104. end_toknum is the token index of the first token not in the
  105. block; and tokens is a list of the tokens in the block.
  106. """
  107. def __init__(self, fileid, block_reader=None, startpos=0,
  108. encoding=None, source=None):
  109. """
  110. Create a new corpus view, based on the file ``fileid``, and
  111. read with ``block_reader``. See the class documentation
  112. for more information.
  113. :param fileid: The path to the file that is read by this
  114. corpus view. ``fileid`` can either be a string or a
  115. ``PathPointer``.
  116. :param startpos: The file position at which the view will
  117. start reading. This can be used to skip over preface
  118. sections.
  119. :param encoding: The unicode encoding that should be used to
  120. read the file's contents. If no encoding is specified,
  121. then the file's contents will be read as a non-unicode
  122. string (i.e., a str).
  123. :param source: If specified, then use an ``SourcedStringStream``
  124. to annotate all strings read from the file with
  125. information about their start offset, end ofset,
  126. and docid. The value of ``source`` will be used as the docid.
  127. """
  128. if block_reader:
  129. self.read_block = block_reader
  130. # Initialize our toknum/filepos mapping.
  131. self._toknum = [0]
  132. self._filepos = [startpos]
  133. self._encoding = encoding
  134. self._source = source
  135. # We don't know our length (number of tokens) yet.
  136. self._len = None
  137. self._fileid = fileid
  138. self._stream = None
  139. self._current_toknum = None
  140. """This variable is set to the index of the next token that
  141. will be read, immediately before ``self.read_block()`` is
  142. called. This is provided for the benefit of the block
  143. reader, which under rare circumstances may need to know
  144. the current token number."""
  145. self._current_blocknum = None
  146. """This variable is set to the index of the next block that
  147. will be read, immediately before ``self.read_block()`` is
  148. called. This is provided for the benefit of the block
  149. reader, which under rare circumstances may need to know
  150. the current block number."""
  151. # Find the length of the file.
  152. try:
  153. if isinstance(self._fileid, PathPointer):
  154. self._eofpos = self._fileid.file_size()
  155. else:
  156. self._eofpos = os.stat(self._fileid).st_size
  157. except Exception, exc:
  158. raise ValueError('Unable to open or access %r -- %s' %
  159. (fileid, exc))
  160. # Maintain a cache of the most recently read block, to
  161. # increase efficiency of random access.
  162. self._cache = (-1, -1, None)
  163. fileid = property(lambda self: self._fileid, doc="""
  164. The fileid of the file that is accessed by this view.
  165. :type: str or PathPointer""")
  166. def read_block(self, stream):
  167. """
  168. Read a block from the input stream.
  169. :return: a block of tokens from the input stream
  170. :rtype: list(any)
  171. :param stream: an input stream
  172. :type stream: stream
  173. """
  174. raise NotImplementedError('Abstract Method')
  175. def _open(self):
  176. """
  177. Open the file stream associated with this corpus view. This
  178. will be called performed if any value is read from the view
  179. while its file stream is closed.
  180. """
  181. if isinstance(self._fileid, PathPointer):
  182. self._stream = self._fileid.open(self._encoding)
  183. elif self._encoding:
  184. self._stream = SeekableUnicodeStreamReader(
  185. open(self._fileid, 'rb'), self._encoding)
  186. else:
  187. self._stream = open(self._fileid, 'rb')
  188. if self._source is not None:
  189. self._stream = SourcedStringStream(self._stream, self._source)
  190. def close(self):
  191. """
  192. Close the file stream associated with this corpus view. This
  193. can be useful if you are worried about running out of file
  194. handles (although the stream should automatically be closed
  195. upon garbage collection of the corpus view). If the corpus
  196. view is accessed after it is closed, it will be automatically
  197. re-opened.
  198. """
  199. if self._stream is not None:
  200. self._stream.close()
  201. self._stream = None
  202. def __len__(self):
  203. if self._len is None:
  204. # iterate_from() sets self._len when it reaches the end
  205. # of the file:
  206. for tok in self.iterate_from(self._toknum[-1]): pass
  207. return self._len
  208. def __getitem__(self, i):
  209. if isinstance(i, slice):
  210. start, stop = slice_bounds(self, i)
  211. # Check if it's in the cache.
  212. offset = self._cache[0]
  213. if offset <= start and stop <= self._cache[1]:
  214. return self._cache[2][start-offset:stop-offset]
  215. # Construct & return the result.
  216. return LazySubsequence(self, start, stop)
  217. else:
  218. # Handle negative indices
  219. if i < 0: i += len(self)
  220. if i < 0: raise IndexError('index out of range')
  221. # Check if it's in the cache.
  222. offset = self._cache[0]
  223. if offset <= i < self._cache[1]:
  224. return self._cache[2][i-offset]
  225. # Use iterate_from to extract it.
  226. try:
  227. return self.iterate_from(i).next()
  228. except StopIteration:
  229. raise IndexError('index out of range')
  230. # If we wanted to be thread-safe, then this method would need to
  231. # do some locking.
  232. def iterate_from(self, start_tok):
  233. # Start by feeding from the cache, if possible.
  234. if self._cache[0] <= start_tok < self._cache[1]:
  235. for tok in self._cache[2][start_tok-self._cache[0]:]:
  236. yield tok
  237. start_tok += 1
  238. # Decide where in the file we should start. If `start` is in
  239. # our mapping, then we can jump straight to the correct block;
  240. # otherwise, start at the last block we've processed.
  241. if start_tok < self._toknum[-1]:
  242. block_index = bisect.bisect_right(self._toknum, start_tok)-1
  243. toknum = self._toknum[block_index]
  244. filepos = self._filepos[block_index]
  245. else:
  246. block_index = len(self._toknum)-1
  247. toknum = self._toknum[-1]
  248. filepos = self._filepos[-1]
  249. # Open the stream, if it's not open already.
  250. if self._stream is None:
  251. self._open()
  252. # Each iteration through this loop, we read a single block
  253. # from the stream.
  254. while filepos < self._eofpos:
  255. # Read the next block.
  256. self._stream.seek(filepos)
  257. self._current_toknum = toknum
  258. self._current_blocknum = block_index
  259. tokens = self.read_block(self._stream)
  260. assert isinstance(tokens, (tuple, list, AbstractLazySequence)), (
  261. 'block reader %s() should return list or tuple.' %
  262. self.read_block.__name__)
  263. num_toks = len(tokens)
  264. new_filepos = self._stream.tell()
  265. assert new_filepos > filepos, (
  266. 'block reader %s() should consume at least 1 byte (filepos=%d)' %
  267. (self.read_block.__name__, filepos))
  268. # Update our cache.
  269. self._cache = (toknum, toknum+num_toks, list(tokens))
  270. # Update our mapping.
  271. assert toknum <= self._toknum[-1]
  272. if num_toks > 0:
  273. block_index += 1
  274. if toknum == self._toknum[-1]:
  275. assert new_filepos > self._filepos[-1] # monotonic!
  276. self._filepos.append(new_filepos)
  277. self._toknum.append(toknum+num_toks)
  278. else:
  279. # Check for consistency:
  280. assert new_filepos == self._filepos[block_index], (
  281. 'inconsistent block reader (num chars read)')
  282. assert toknum+num_toks == self._toknum[block_index], (
  283. 'inconsistent block reader (num tokens returned)')
  284. # If we reached the end of the file, then update self._len
  285. if new_filepos == self._eofpos:
  286. self._len = toknum + num_toks
  287. # Generate the tokens in this block (but skip any tokens
  288. # before start_tok). Note that between yields, our state
  289. # may be modified.
  290. for tok in tokens[max(0, start_tok-toknum):]:
  291. yield tok
  292. # If we're at the end of the file, then we're done.
  293. assert new_filepos <= self._eofpos
  294. if new_filepos == self._eofpos:
  295. break
  296. # Update our indices
  297. toknum += num_toks
  298. filepos = new_filepos
  299. # If we reach this point, then we should know our length.
  300. assert self._len is not None
  301. # Use concat for these, so we can use a ConcatenatedCorpusView
  302. # when possible.
  303. def __add__(self, other):
  304. return concat([self, other])
  305. def __radd__(self, other):
  306. return concat([other, self])
  307. def __mul__(self, count):
  308. return concat([self] * count)
  309. def __rmul__(self, count):
  310. return concat([self] * count)
  311. class ConcatenatedCorpusView(AbstractLazySequence):
  312. """
  313. A 'view' of a corpus file that joins together one or more
  314. ``StreamBackedCorpusViews<StreamBackedCorpusView>``. At most
  315. one file handle is left open at any time.
  316. """
  317. def __init__(self, corpus_views):
  318. self._pieces = corpus_views
  319. """A list of the corpus subviews that make up this
  320. concatenation."""
  321. self._offsets = [0]
  322. """A list of offsets, indicating the index at which each
  323. subview begins. In particular::
  324. offsets[i] = sum([len(p) for p in pieces[:i]])"""
  325. self._open_piece = None
  326. """The most recently accessed corpus subview (or None).
  327. Before a new subview is accessed, this subview will be closed."""
  328. def __len__(self):
  329. if len(self._offsets) <= len(self._pieces):
  330. # Iterate to the end of the corpus.
  331. for tok in self.iterate_from(self._offsets[-1]): pass
  332. return self._offsets[-1]
  333. def close(self):
  334. for piece in self._pieces:
  335. piece.close()
  336. def iterate_from(self, start_tok):
  337. piecenum = bisect.bisect_right(self._offsets, start_tok)-1
  338. while piecenum < len(self._pieces):
  339. offset = self._offsets[piecenum]
  340. piece = self._pieces[piecenum]
  341. # If we've got another piece open, close it first.
  342. if self._open_piece is not piece:
  343. if self._open_piece is not None:
  344. self._open_piece.close()
  345. self._open_piece = piece
  346. # Get everything we can from this piece.
  347. for tok in piece.iterate_from(max(0, start_tok-offset)):
  348. yield tok
  349. # Update the offset table.
  350. if piecenum+1 == len(self._offsets):
  351. self._offsets.append(self._offsets[-1] + len(piece))
  352. # Move on to the next piece.
  353. piecenum += 1
  354. def concat(docs):
  355. """
  356. Concatenate together the contents of multiple documents from a
  357. single corpus, using an appropriate concatenation function. This
  358. utility function is used by corpus readers when the user requests
  359. more than one document at a time.
  360. """
  361. if len(docs) == 1:
  362. return docs[0]
  363. if len(docs) == 0:
  364. raise ValueError('concat() expects at least one object!')
  365. types = set([d.__class__ for d in docs])
  366. # If they're all strings, use string concatenation.
  367. if types.issubset([str, unicode, basestring]):
  368. return reduce((lambda a,b:a+b), docs, '')
  369. # If they're all corpus views, then use ConcatenatedCorpusView.
  370. for typ in types:
  371. if not issubclass(typ, (StreamBackedCorpusView,
  372. ConcatenatedCorpusView)):
  373. break
  374. else:
  375. return ConcatenatedCorpusView(docs)
  376. # If they're all lazy sequences, use a lazy concatenation
  377. for typ in types:
  378. if not issubclass(typ, AbstractLazySequence):
  379. break
  380. else:
  381. return LazyConcatenation(docs)
  382. # Otherwise, see what we can do:
  383. if len(types) == 1:
  384. typ = list(types)[0]
  385. if issubclass(typ, list):
  386. return reduce((lambda a,b:a+b), docs, [])
  387. if issubclass(typ, tuple):
  388. return reduce((lambda a,b:a+b), docs, ())
  389. if ElementTree.iselement(typ):
  390. xmltree = ElementTree.Element('documents')
  391. for doc in docs: xmltree.append(doc)
  392. return xmltree
  393. # No method found!
  394. raise ValueError("Don't know how to concatenate types: %r" % types)
  395. ######################################################################
  396. #{ Corpus View for Pickled Sequences
  397. ######################################################################
  398. class PickleCorpusView(StreamBackedCorpusView):
  399. """
  400. A stream backed corpus view for corpus files that consist of
  401. sequences of serialized Python objects (serialized using
  402. ``pickle.dump``). One use case for this class is to store the
  403. result of running feature detection on a corpus to disk. This can
  404. be useful when performing feature detection is expensive (so we
  405. don't want to repeat it); but the corpus is too large to store in
  406. memory. The following example illustrates this technique:
  407. .. doctest::
  408. :options: +SKIP
  409. >>> from nltk.corpus.reader.util import PickleCorpusView
  410. >>> from nltk.util import LazyMap
  411. >>> feature_corpus = LazyMap(detect_features, corpus)
  412. >>> PickleCorpusView.write(feature_corpus, some_fileid)
  413. >>> pcv = PickleCorpusView(some_fileid)
  414. """
  415. BLOCK_SIZE = 100
  416. PROTOCOL = -1
  417. def __init__(self, fileid, delete_on_gc=False):
  418. """
  419. Create a new corpus view that reads the pickle corpus
  420. ``fileid``.
  421. :param delete_on_gc: If true, then ``fileid`` will be deleted
  422. whenever this object gets garbage-collected.
  423. """
  424. self._delete_on_gc = delete_on_gc
  425. StreamBackedCorpusView.__init__(self, fileid)
  426. def read_block(self, stream):
  427. result = []
  428. for i in range(self.BLOCK_SIZE):
  429. try: result.append(pickle.load(stream))
  430. except EOFError: break
  431. return result
  432. def __del__(self):
  433. """
  434. If ``delete_on_gc`` was set to true when this
  435. ``PickleCorpusView`` was created, then delete the corpus view's
  436. fileid. (This method is called whenever a
  437. ``PickledCorpusView`` is garbage-collected.
  438. """
  439. if getattr(self, '_delete_on_gc'):
  440. if os.path.exists(self._fileid):
  441. try: os.remove(self._fileid)
  442. except (OSError, IOError): pass
  443. self.__dict__.clear() # make the garbage collector's job easier
  444. @classmethod
  445. def write(cls, sequence, output_file):
  446. if isinstance(output_file, basestring):
  447. output_file = open(output_file, 'wb')
  448. for item in sequence:
  449. pickle.dump(item, output_file, cls.PROTOCOL)
  450. @classmethod
  451. def cache_to_tempfile(cls, sequence, delete_on_gc=True):
  452. """
  453. Write the given sequence to a temporary file as a pickle
  454. corpus; and then return a ``PickleCorpusView`` view for that
  455. temporary corpus file.
  456. :param delete_on_gc: If true, then the temporary file will be
  457. deleted whenever this object gets garbage-collected.
  458. """
  459. try:
  460. fd, output_file_name = tempfile.mkstemp('.pcv', 'nltk-')
  461. output_file = os.fdopen(fd, 'wb')
  462. cls.write(sequence, output_file)
  463. output_file.close()
  464. return PickleCorpusView(output_file_name, delete_on_gc)
  465. except (OSError, IOError), e:
  466. raise ValueError('Error while creating temp file: %s' % e)
  467. ######################################################################
  468. #{ Block Readers
  469. ######################################################################
  470. def read_whitespace_block(stream):
  471. toks = []
  472. for i in range(20): # Read 20 lines at a time.
  473. toks.extend(stream.readline().split())
  474. return toks
  475. def read_wordpunct_block(stream):
  476. toks = []
  477. for i in range(20): # Read 20 lines at a time.
  478. toks.extend(wordpunct_tokenize(stream.readline()))
  479. return toks
  480. def read_line_block(stream):
  481. toks = []
  482. for i in range(20):
  483. line = stream.readline()
  484. if not line: return toks
  485. toks.append(line.rstrip('\n'))
  486. return toks
  487. def read_blankline_block(stream):
  488. s = ''
  489. while True:
  490. line = stream.readline()
  491. # End of file:
  492. if not line:
  493. if s: return [s]
  494. else: return []
  495. # Blank line:
  496. elif line and not line.strip():
  497. if s: return [s]
  498. # Other line:
  499. else:
  500. s += line
  501. def read_alignedsent_block(stream):
  502. s = ''
  503. while True:
  504. line = stream.readline()
  505. if line[0] == '=' or line[0] == '\n' or line[:2] == '\r\n':
  506. continue
  507. # End of file:
  508. if not line:
  509. if s: return [s]
  510. else: return []
  511. # Other line:
  512. else:
  513. s += line
  514. if re.match('^\d+-\d+', line) is not None:
  515. return [s]
  516. def read_regexp_block(stream, start_re, end_re=None):
  517. """
  518. Read a sequence of tokens from a stream, where tokens begin with
  519. lines that match ``start_re``. If ``end_re`` is specified, then
  520. tokens end with lines that match ``end_re``; otherwise, tokens end
  521. whenever the next line matching ``start_re`` or EOF is found.
  522. """
  523. # Scan until we find a line matching the start regexp.
  524. while True:
  525. line = stream.readline()
  526. if not line: return [] # end of file.
  527. if re.match(start_re, line): break
  528. # Scan until we find another line matching the regexp, or EOF.
  529. lines = [line]
  530. while True:
  531. oldpos = stream.tell()
  532. line = stream.readline()
  533. # End of file:
  534. if not line:
  535. return [''.join(lines)]
  536. # End of token:
  537. if end_re is not None and re.match(end_re, line):
  538. return [''.join(lines)]
  539. # Start of new token: backup to just before it starts, and
  540. # return the token we've already collected.
  541. if end_re is None and re.match(start_re, line):
  542. stream.seek(oldpos)
  543. return [''.join(lines)]
  544. # Anything else is part of the token.
  545. lines.append(line)
  546. def read_sexpr_block(stream, block_size=16384, comment_char=None):
  547. """
  548. Read a sequence of s-expressions from the stream, and leave the
  549. stream's file position at the end the last complete s-expression
  550. read. This function will always return at least one s-expression,
  551. unless there are no more s-expressions in the file.
  552. If the file ends in in the middle of an s-expression, then that
  553. incomplete s-expression is returned when the end of the file is
  554. reached.
  555. :param block_size: The default block size for reading. If an
  556. s-expression is longer than one block, then more than one
  557. block will be read.
  558. :param comment_char: A character that marks comments. Any lines
  559. that begin with this character will be stripped out.
  560. (If spaces or tabs precede the comment character, then the
  561. line will not be stripped.)
  562. """
  563. start = stream.tell()
  564. block = stream.read(block_size)
  565. encoding = getattr(stream, 'encoding', None)
  566. assert encoding is not None or isinstance(block, str)
  567. if encoding not in (None, 'utf-8'):
  568. import warnings
  569. warnings.warn('Parsing may fail, depending on the properties '
  570. 'of the %s encoding!' % encoding)
  571. # (e.g., the utf-16 encoding does not work because it insists
  572. # on adding BOMs to the beginning of encoded strings.)
  573. if comment_char:
  574. COMMENT = re.compile('(?m)^%s.*$' % re.escape(comment_char))
  575. while True:
  576. try:
  577. # If we're stripping comments, then make sure our block ends
  578. # on a line boundary; and then replace any comments with
  579. # space characters. (We can't just strip them out -- that
  580. # would make our offset wrong.)
  581. if comment_char:
  582. block += stream.readline()
  583. block = re.sub(COMMENT, _sub_space, block)
  584. # Read the block.
  585. tokens, offset = _parse_sexpr_block(block)
  586. # Skip whitespace
  587. offset = re.compile(r'\s*').search(block, offset).end()
  588. # Move to the end position.
  589. if encoding is None:
  590. stream.seek(start+offset)
  591. else:
  592. stream.seek(start+len(block[:offset].encode(encoding)))
  593. # Return the list of tokens we processed
  594. return tokens
  595. except ValueError, e:
  596. if e.args[0] == 'Block too small':
  597. next_block = stream.read(block_size)
  598. if next_block:
  599. block += next_block
  600. continue
  601. else:
  602. # The file ended mid-sexpr -- return what we got.
  603. return [block.strip()]
  604. else: raise
  605. def _sub_space(m):
  606. """Helper function: given a regexp match, return a string of
  607. spaces that's the same length as the matched string."""
  608. return ' '*(m.end()-m.start())
  609. def _parse_sexpr_block(block):
  610. tokens = []
  611. start = end = 0
  612. while end < len(block):
  613. m = re.compile(r'\S').search(block, end)
  614. if not m:
  615. return tokens, end
  616. start = m.start()
  617. # Case 1: sexpr is not parenthesized.
  618. if m.group() != '(':
  619. m2 = re.compile(r'[\s(]').search(block, start)
  620. if m2:
  621. end = m2.start()
  622. else:
  623. if tokens: return tokens, end
  624. raise ValueError('Block too small')
  625. # Case 2: parenthesized sexpr.
  626. else:
  627. nesting = 0
  628. for m in re.compile(r'[()]').finditer(block, start):
  629. if m.group()=='(': nesting += 1
  630. else: nesting -= 1
  631. if nesting == 0:
  632. end = m.end()
  633. break
  634. else:
  635. if tokens: return tokens, end
  636. raise ValueError('Block too small')
  637. tokens.append(block[start:end])
  638. return tokens, end
  639. ######################################################################
  640. #{ Finding Corpus Items
  641. ######################################################################
  642. def find_corpus_fileids(root, regexp):
  643. if not isinstance(root, PathPointer):
  644. raise TypeError('find_corpus_fileids: expected a PathPointer')
  645. regexp += '$'
  646. # Find fileids in a zipfile: scan the zipfile's namelist. Filter
  647. # out entries that end in '/' -- they're directories.
  648. if isinstance(root, ZipFilePathPointer):
  649. fileids = [name[len(root.entry):] for name in root.zipfile.namelist()
  650. if not name.endswith('/')]
  651. items = [name for name in fileids if re.match(regexp, name)]
  652. return sorted(items)
  653. # Find fileids in a directory: use os.walk to search all (proper
  654. # or symlinked) subdirectories, and match paths against the regexp.
  655. elif isinstance(root, FileSystemPathPointer):
  656. items = []
  657. for dirname, subdirs, fileids in os.walk(root.path, followlinks=True):
  658. prefix = ''.join('%s/' % p for p in _path_from(root.path, dirname))
  659. items += [prefix+fileid for fileid in fileids
  660. if re.match(regexp, prefix+fileid)]
  661. # Don't visit svn directories:
  662. if '.svn' in subdirs: subdirs.remove('.svn')
  663. return sorted(items)
  664. else:
  665. raise AssertionError("Don't know how to handle %r" % root)
  666. def _path_from(parent, child):
  667. if os.path.split(parent)[1] == '':
  668. parent = os.path.split(parent)[0]
  669. path = []
  670. while parent != child:
  671. child, dirname = os.path.split(child)
  672. path.insert(0, dirname)
  673. assert os.path.split(child)[0] != child
  674. return path
  675. ######################################################################
  676. #{ Paragraph structure in Treebank files
  677. ######################################################################
  678. def tagged_treebank_para_block_reader(stream):
  679. # Read the next paragraph.
  680. para = ''
  681. while True:
  682. line = stream.readline()
  683. # End of paragraph:
  684. if re.match('======+\s*$', line):
  685. if para.strip(): return [para]
  686. # End of file:
  687. elif line == '':
  688. if para.strip(): return [para]
  689. else: return []
  690. # Content line:
  691. else:
  692. para += line