PageRenderTime 59ms CodeModel.GetById 25ms RepoModel.GetById 0ms app.codeStats 0ms

/nltk/corpus/reader/util.py

https://github.com/BrucePHill/nltk
Python | 797 lines | 698 code | 32 blank | 67 comment | 35 complexity | e7c9a43d542ef084b064f898ce6dbc1e MD5 | raw file
Possible License(s): Apache-2.0
  1. # Natural Language Toolkit: Corpus Reader Utilities
  2. #
  3. # Copyright (C) 2001-2013 NLTK Project
  4. # Author: Steven Bird <sb@ldc.upenn.edu>
  5. # Edward Loper <edloper@gradient.cis.upenn.edu>
  6. # URL: <http://www.nltk.org/>
  7. # For license information, see LICENSE.TXT
  8. import os
  9. import bisect
  10. import re
  11. import tempfile
  12. from functools import reduce
  13. try:
  14. import cPickle as pickle
  15. except ImportError:
  16. import pickle
  17. # Use the c version of ElementTree, which is faster, if possible:
  18. try: from xml.etree import cElementTree as ElementTree
  19. except ImportError: from xml.etree import ElementTree
  20. from nltk import compat
  21. from nltk.tokenize import wordpunct_tokenize
  22. from nltk.internals import slice_bounds
  23. from nltk.data import PathPointer, FileSystemPathPointer, ZipFilePathPointer
  24. from nltk.data import SeekableUnicodeStreamReader
  25. from nltk.util import AbstractLazySequence, LazySubsequence, LazyConcatenation, py25
  26. ######################################################################
  27. #{ Corpus View
  28. ######################################################################
  29. class StreamBackedCorpusView(AbstractLazySequence):
  30. """
  31. A 'view' of a corpus file, which acts like a sequence of tokens:
  32. it can be accessed by index, iterated over, etc. However, the
  33. tokens are only constructed as-needed -- the entire corpus is
  34. never stored in memory at once.
  35. The constructor to ``StreamBackedCorpusView`` takes two arguments:
  36. a corpus fileid (specified as a string or as a ``PathPointer``);
  37. and a block reader. A "block reader" is a function that reads
  38. zero or more tokens from a stream, and returns them as a list. A
  39. very simple example of a block reader is:
  40. >>> def simple_block_reader(stream):
  41. ... return stream.readline().split()
  42. This simple block reader reads a single line at a time, and
  43. returns a single token (consisting of a string) for each
  44. whitespace-separated substring on the line.
  45. When deciding how to define the block reader for a given
  46. corpus, careful consideration should be given to the size of
  47. blocks handled by the block reader. Smaller block sizes will
  48. increase the memory requirements of the corpus view's internal
  49. data structures (by 2 integers per block). On the other hand,
  50. larger block sizes may decrease performance for random access to
  51. the corpus. (But note that larger block sizes will *not*
  52. decrease performance for iteration.)
  53. Internally, ``CorpusView`` maintains a partial mapping from token
  54. index to file position, with one entry per block. When a token
  55. with a given index *i* is requested, the ``CorpusView`` constructs
  56. it as follows:
  57. 1. First, it searches the toknum/filepos mapping for the token
  58. index closest to (but less than or equal to) *i*.
  59. 2. Then, starting at the file position corresponding to that
  60. index, it reads one block at a time using the block reader
  61. until it reaches the requested token.
  62. The toknum/filepos mapping is created lazily: it is initially
  63. empty, but every time a new block is read, the block's
  64. initial token is added to the mapping. (Thus, the toknum/filepos
  65. map has one entry per block.)
  66. In order to increase efficiency for random access patterns that
  67. have high degrees of locality, the corpus view may cache one or
  68. more blocks.
  69. :note: Each ``CorpusView`` object internally maintains an open file
  70. object for its underlying corpus file. This file should be
  71. automatically closed when the ``CorpusView`` is garbage collected,
  72. but if you wish to close it manually, use the ``close()``
  73. method. If you access a ``CorpusView``'s items after it has been
  74. closed, the file object will be automatically re-opened.
  75. :warning: If the contents of the file are modified during the
  76. lifetime of the ``CorpusView``, then the ``CorpusView``'s behavior
  77. is undefined.
  78. :warning: If a unicode encoding is specified when constructing a
  79. ``CorpusView``, then the block reader may only call
  80. ``stream.seek()`` with offsets that have been returned by
  81. ``stream.tell()``; in particular, calling ``stream.seek()`` with
  82. relative offsets, or with offsets based on string lengths, may
  83. lead to incorrect behavior.
  84. :ivar _block_reader: The function used to read
  85. a single block from the underlying file stream.
  86. :ivar _toknum: A list containing the token index of each block
  87. that has been processed. In particular, ``_toknum[i]`` is the
  88. token index of the first token in block ``i``. Together
  89. with ``_filepos``, this forms a partial mapping between token
  90. indices and file positions.
  91. :ivar _filepos: A list containing the file position of each block
  92. that has been processed. In particular, ``_toknum[i]`` is the
  93. file position of the first character in block ``i``. Together
  94. with ``_toknum``, this forms a partial mapping between token
  95. indices and file positions.
  96. :ivar _stream: The stream used to access the underlying corpus file.
  97. :ivar _len: The total number of tokens in the corpus, if known;
  98. or None, if the number of tokens is not yet known.
  99. :ivar _eofpos: The character position of the last character in the
  100. file. This is calculated when the corpus view is initialized,
  101. and is used to decide when the end of file has been reached.
  102. :ivar _cache: A cache of the most recently read block. It
  103. is encoded as a tuple (start_toknum, end_toknum, tokens), where
  104. start_toknum is the token index of the first token in the block;
  105. end_toknum is the token index of the first token not in the
  106. block; and tokens is a list of the tokens in the block.
  107. """
  108. def __init__(self, fileid, block_reader=None, startpos=0,
  109. encoding='utf8'):
  110. """
  111. Create a new corpus view, based on the file ``fileid``, and
  112. read with ``block_reader``. See the class documentation
  113. for more information.
  114. :param fileid: The path to the file that is read by this
  115. corpus view. ``fileid`` can either be a string or a
  116. ``PathPointer``.
  117. :param startpos: The file position at which the view will
  118. start reading. This can be used to skip over preface
  119. sections.
  120. :param encoding: The unicode encoding that should be used to
  121. read the file's contents. If no encoding is specified,
  122. then the file's contents will be read as a non-unicode
  123. string (i.e., a str).
  124. """
  125. if block_reader:
  126. self.read_block = block_reader
  127. # Initialize our toknum/filepos mapping.
  128. self._toknum = [0]
  129. self._filepos = [startpos]
  130. self._encoding = encoding
  131. # We don't know our length (number of tokens) yet.
  132. self._len = None
  133. self._fileid = fileid
  134. self._stream = None
  135. self._current_toknum = None
  136. """This variable is set to the index of the next token that
  137. will be read, immediately before ``self.read_block()`` is
  138. called. This is provided for the benefit of the block
  139. reader, which under rare circumstances may need to know
  140. the current token number."""
  141. self._current_blocknum = None
  142. """This variable is set to the index of the next block that
  143. will be read, immediately before ``self.read_block()`` is
  144. called. This is provided for the benefit of the block
  145. reader, which under rare circumstances may need to know
  146. the current block number."""
  147. # Find the length of the file.
  148. try:
  149. if isinstance(self._fileid, PathPointer):
  150. self._eofpos = self._fileid.file_size()
  151. else:
  152. self._eofpos = os.stat(self._fileid).st_size
  153. except Exception as exc:
  154. raise ValueError('Unable to open or access %r -- %s' %
  155. (fileid, exc))
  156. # Maintain a cache of the most recently read block, to
  157. # increase efficiency of random access.
  158. self._cache = (-1, -1, None)
  159. fileid = property(lambda self: self._fileid, doc="""
  160. The fileid of the file that is accessed by this view.
  161. :type: str or PathPointer""")
  162. def read_block(self, stream):
  163. """
  164. Read a block from the input stream.
  165. :return: a block of tokens from the input stream
  166. :rtype: list(any)
  167. :param stream: an input stream
  168. :type stream: stream
  169. """
  170. raise NotImplementedError('Abstract Method')
  171. def _open(self):
  172. """
  173. Open the file stream associated with this corpus view. This
  174. will be called performed if any value is read from the view
  175. while its file stream is closed.
  176. """
  177. if isinstance(self._fileid, PathPointer):
  178. self._stream = self._fileid.open(self._encoding)
  179. elif self._encoding:
  180. self._stream = SeekableUnicodeStreamReader(
  181. open(self._fileid, 'rb'), self._encoding)
  182. else:
  183. self._stream = open(self._fileid, 'rb')
  184. def close(self):
  185. """
  186. Close the file stream associated with this corpus view. This
  187. can be useful if you are worried about running out of file
  188. handles (although the stream should automatically be closed
  189. upon garbage collection of the corpus view). If the corpus
  190. view is accessed after it is closed, it will be automatically
  191. re-opened.
  192. """
  193. if self._stream is not None:
  194. self._stream.close()
  195. self._stream = None
  196. def __len__(self):
  197. if self._len is None:
  198. # iterate_from() sets self._len when it reaches the end
  199. # of the file:
  200. for tok in self.iterate_from(self._toknum[-1]): pass
  201. return self._len
  202. def __getitem__(self, i):
  203. if isinstance(i, slice):
  204. start, stop = slice_bounds(self, i)
  205. # Check if it's in the cache.
  206. offset = self._cache[0]
  207. if offset <= start and stop <= self._cache[1]:
  208. return self._cache[2][start-offset:stop-offset]
  209. # Construct & return the result.
  210. return LazySubsequence(self, start, stop)
  211. else:
  212. # Handle negative indices
  213. if i < 0: i += len(self)
  214. if i < 0: raise IndexError('index out of range')
  215. # Check if it's in the cache.
  216. offset = self._cache[0]
  217. if offset <= i < self._cache[1]:
  218. return self._cache[2][i-offset]
  219. # Use iterate_from to extract it.
  220. try:
  221. return next(self.iterate_from(i))
  222. except StopIteration:
  223. raise IndexError('index out of range')
  224. # If we wanted to be thread-safe, then this method would need to
  225. # do some locking.
  226. def iterate_from(self, start_tok):
  227. # Start by feeding from the cache, if possible.
  228. if self._cache[0] <= start_tok < self._cache[1]:
  229. for tok in self._cache[2][start_tok-self._cache[0]:]:
  230. yield tok
  231. start_tok += 1
  232. # Decide where in the file we should start. If `start` is in
  233. # our mapping, then we can jump straight to the correct block;
  234. # otherwise, start at the last block we've processed.
  235. if start_tok < self._toknum[-1]:
  236. block_index = bisect.bisect_right(self._toknum, start_tok)-1
  237. toknum = self._toknum[block_index]
  238. filepos = self._filepos[block_index]
  239. else:
  240. block_index = len(self._toknum)-1
  241. toknum = self._toknum[-1]
  242. filepos = self._filepos[-1]
  243. # Open the stream, if it's not open already.
  244. if self._stream is None:
  245. self._open()
  246. # Each iteration through this loop, we read a single block
  247. # from the stream.
  248. while filepos < self._eofpos:
  249. # Read the next block.
  250. self._stream.seek(filepos)
  251. self._current_toknum = toknum
  252. self._current_blocknum = block_index
  253. tokens = self.read_block(self._stream)
  254. assert isinstance(tokens, (tuple, list, AbstractLazySequence)), (
  255. 'block reader %s() should return list or tuple.' %
  256. self.read_block.__name__)
  257. num_toks = len(tokens)
  258. new_filepos = self._stream.tell()
  259. assert new_filepos > filepos, (
  260. 'block reader %s() should consume at least 1 byte (filepos=%d)' %
  261. (self.read_block.__name__, filepos))
  262. # Update our cache.
  263. self._cache = (toknum, toknum+num_toks, list(tokens))
  264. # Update our mapping.
  265. assert toknum <= self._toknum[-1]
  266. if num_toks > 0:
  267. block_index += 1
  268. if toknum == self._toknum[-1]:
  269. assert new_filepos > self._filepos[-1] # monotonic!
  270. self._filepos.append(new_filepos)
  271. self._toknum.append(toknum+num_toks)
  272. else:
  273. # Check for consistency:
  274. assert new_filepos == self._filepos[block_index], (
  275. 'inconsistent block reader (num chars read)')
  276. assert toknum+num_toks == self._toknum[block_index], (
  277. 'inconsistent block reader (num tokens returned)')
  278. # If we reached the end of the file, then update self._len
  279. if new_filepos == self._eofpos:
  280. self._len = toknum + num_toks
  281. # Generate the tokens in this block (but skip any tokens
  282. # before start_tok). Note that between yields, our state
  283. # may be modified.
  284. for tok in tokens[max(0, start_tok-toknum):]:
  285. yield tok
  286. # If we're at the end of the file, then we're done.
  287. assert new_filepos <= self._eofpos
  288. if new_filepos == self._eofpos:
  289. break
  290. # Update our indices
  291. toknum += num_toks
  292. filepos = new_filepos
  293. # If we reach this point, then we should know our length.
  294. assert self._len is not None
  295. # Use concat for these, so we can use a ConcatenatedCorpusView
  296. # when possible.
  297. def __add__(self, other):
  298. return concat([self, other])
  299. def __radd__(self, other):
  300. return concat([other, self])
  301. def __mul__(self, count):
  302. return concat([self] * count)
  303. def __rmul__(self, count):
  304. return concat([self] * count)
  305. class ConcatenatedCorpusView(AbstractLazySequence):
  306. """
  307. A 'view' of a corpus file that joins together one or more
  308. ``StreamBackedCorpusViews<StreamBackedCorpusView>``. At most
  309. one file handle is left open at any time.
  310. """
  311. def __init__(self, corpus_views):
  312. self._pieces = corpus_views
  313. """A list of the corpus subviews that make up this
  314. concatenation."""
  315. self._offsets = [0]
  316. """A list of offsets, indicating the index at which each
  317. subview begins. In particular::
  318. offsets[i] = sum([len(p) for p in pieces[:i]])"""
  319. self._open_piece = None
  320. """The most recently accessed corpus subview (or None).
  321. Before a new subview is accessed, this subview will be closed."""
  322. def __len__(self):
  323. if len(self._offsets) <= len(self._pieces):
  324. # Iterate to the end of the corpus.
  325. for tok in self.iterate_from(self._offsets[-1]): pass
  326. return self._offsets[-1]
  327. def close(self):
  328. for piece in self._pieces:
  329. piece.close()
  330. def iterate_from(self, start_tok):
  331. piecenum = bisect.bisect_right(self._offsets, start_tok)-1
  332. while piecenum < len(self._pieces):
  333. offset = self._offsets[piecenum]
  334. piece = self._pieces[piecenum]
  335. # If we've got another piece open, close it first.
  336. if self._open_piece is not piece:
  337. if self._open_piece is not None:
  338. self._open_piece.close()
  339. self._open_piece = piece
  340. # Get everything we can from this piece.
  341. for tok in piece.iterate_from(max(0, start_tok-offset)):
  342. yield tok
  343. # Update the offset table.
  344. if piecenum+1 == len(self._offsets):
  345. self._offsets.append(self._offsets[-1] + len(piece))
  346. # Move on to the next piece.
  347. piecenum += 1
  348. def concat(docs):
  349. """
  350. Concatenate together the contents of multiple documents from a
  351. single corpus, using an appropriate concatenation function. This
  352. utility function is used by corpus readers when the user requests
  353. more than one document at a time.
  354. """
  355. if len(docs) == 1:
  356. return docs[0]
  357. if len(docs) == 0:
  358. raise ValueError('concat() expects at least one object!')
  359. types = set(d.__class__ for d in docs)
  360. # If they're all strings, use string concatenation.
  361. if all(isinstance(doc, compat.string_types) for doc in docs):
  362. return ''.join(docs)
  363. # If they're all corpus views, then use ConcatenatedCorpusView.
  364. for typ in types:
  365. if not issubclass(typ, (StreamBackedCorpusView,
  366. ConcatenatedCorpusView)):
  367. break
  368. else:
  369. return ConcatenatedCorpusView(docs)
  370. # If they're all lazy sequences, use a lazy concatenation
  371. for typ in types:
  372. if not issubclass(typ, AbstractLazySequence):
  373. break
  374. else:
  375. return LazyConcatenation(docs)
  376. # Otherwise, see what we can do:
  377. if len(types) == 1:
  378. typ = list(types)[0]
  379. if issubclass(typ, list):
  380. return reduce((lambda a,b:a+b), docs, [])
  381. if issubclass(typ, tuple):
  382. return reduce((lambda a,b:a+b), docs, ())
  383. if ElementTree.iselement(typ):
  384. xmltree = ElementTree.Element('documents')
  385. for doc in docs: xmltree.append(doc)
  386. return xmltree
  387. # No method found!
  388. raise ValueError("Don't know how to concatenate types: %r" % types)
  389. ######################################################################
  390. #{ Corpus View for Pickled Sequences
  391. ######################################################################
  392. class PickleCorpusView(StreamBackedCorpusView):
  393. """
  394. A stream backed corpus view for corpus files that consist of
  395. sequences of serialized Python objects (serialized using
  396. ``pickle.dump``). One use case for this class is to store the
  397. result of running feature detection on a corpus to disk. This can
  398. be useful when performing feature detection is expensive (so we
  399. don't want to repeat it); but the corpus is too large to store in
  400. memory. The following example illustrates this technique:
  401. >>> from nltk.corpus.reader.util import PickleCorpusView
  402. >>> from nltk.util import LazyMap
  403. >>> feature_corpus = LazyMap(detect_features, corpus) # doctest: +SKIP
  404. >>> PickleCorpusView.write(feature_corpus, some_fileid) # doctest: +SKIP
  405. >>> pcv = PickleCorpusView(some_fileid) # doctest: +SKIP
  406. """
  407. BLOCK_SIZE = 100
  408. PROTOCOL = -1
  409. def __init__(self, fileid, delete_on_gc=False):
  410. """
  411. Create a new corpus view that reads the pickle corpus
  412. ``fileid``.
  413. :param delete_on_gc: If true, then ``fileid`` will be deleted
  414. whenever this object gets garbage-collected.
  415. """
  416. self._delete_on_gc = delete_on_gc
  417. StreamBackedCorpusView.__init__(self, fileid)
  418. def read_block(self, stream):
  419. result = []
  420. for i in range(self.BLOCK_SIZE):
  421. try: result.append(pickle.load(stream))
  422. except EOFError: break
  423. return result
  424. def __del__(self):
  425. """
  426. If ``delete_on_gc`` was set to true when this
  427. ``PickleCorpusView`` was created, then delete the corpus view's
  428. fileid. (This method is called whenever a
  429. ``PickledCorpusView`` is garbage-collected.
  430. """
  431. if getattr(self, '_delete_on_gc'):
  432. if os.path.exists(self._fileid):
  433. try: os.remove(self._fileid)
  434. except (OSError, IOError): pass
  435. self.__dict__.clear() # make the garbage collector's job easier
  436. @classmethod
  437. def write(cls, sequence, output_file):
  438. if isinstance(output_file, compat.string_types):
  439. output_file = open(output_file, 'wb')
  440. for item in sequence:
  441. pickle.dump(item, output_file, cls.PROTOCOL)
  442. @classmethod
  443. def cache_to_tempfile(cls, sequence, delete_on_gc=True):
  444. """
  445. Write the given sequence to a temporary file as a pickle
  446. corpus; and then return a ``PickleCorpusView`` view for that
  447. temporary corpus file.
  448. :param delete_on_gc: If true, then the temporary file will be
  449. deleted whenever this object gets garbage-collected.
  450. """
  451. try:
  452. fd, output_file_name = tempfile.mkstemp('.pcv', 'nltk-')
  453. output_file = os.fdopen(fd, 'wb')
  454. cls.write(sequence, output_file)
  455. output_file.close()
  456. return PickleCorpusView(output_file_name, delete_on_gc)
  457. except (OSError, IOError) as e:
  458. raise ValueError('Error while creating temp file: %s' % e)
  459. ######################################################################
  460. #{ Block Readers
  461. ######################################################################
  462. def read_whitespace_block(stream):
  463. toks = []
  464. for i in range(20): # Read 20 lines at a time.
  465. toks.extend(stream.readline().split())
  466. return toks
  467. def read_wordpunct_block(stream):
  468. toks = []
  469. for i in range(20): # Read 20 lines at a time.
  470. toks.extend(wordpunct_tokenize(stream.readline()))
  471. return toks
  472. def read_line_block(stream):
  473. toks = []
  474. for i in range(20):
  475. line = stream.readline()
  476. if not line: return toks
  477. toks.append(line.rstrip('\n'))
  478. return toks
  479. def read_blankline_block(stream):
  480. s = ''
  481. while True:
  482. line = stream.readline()
  483. # End of file:
  484. if not line:
  485. if s: return [s]
  486. else: return []
  487. # Blank line:
  488. elif line and not line.strip():
  489. if s: return [s]
  490. # Other line:
  491. else:
  492. s += line
  493. def read_alignedsent_block(stream):
  494. s = ''
  495. while True:
  496. line = stream.readline()
  497. if line[0] == '=' or line[0] == '\n' or line[:2] == '\r\n':
  498. continue
  499. # End of file:
  500. if not line:
  501. if s: return [s]
  502. else: return []
  503. # Other line:
  504. else:
  505. s += line
  506. if re.match('^\d+-\d+', line) is not None:
  507. return [s]
  508. def read_regexp_block(stream, start_re, end_re=None):
  509. """
  510. Read a sequence of tokens from a stream, where tokens begin with
  511. lines that match ``start_re``. If ``end_re`` is specified, then
  512. tokens end with lines that match ``end_re``; otherwise, tokens end
  513. whenever the next line matching ``start_re`` or EOF is found.
  514. """
  515. # Scan until we find a line matching the start regexp.
  516. while True:
  517. line = stream.readline()
  518. if not line: return [] # end of file.
  519. if re.match(start_re, line): break
  520. # Scan until we find another line matching the regexp, or EOF.
  521. lines = [line]
  522. while True:
  523. oldpos = stream.tell()
  524. line = stream.readline()
  525. # End of file:
  526. if not line:
  527. return [''.join(lines)]
  528. # End of token:
  529. if end_re is not None and re.match(end_re, line):
  530. return [''.join(lines)]
  531. # Start of new token: backup to just before it starts, and
  532. # return the token we've already collected.
  533. if end_re is None and re.match(start_re, line):
  534. stream.seek(oldpos)
  535. return [''.join(lines)]
  536. # Anything else is part of the token.
  537. lines.append(line)
  538. def read_sexpr_block(stream, block_size=16384, comment_char=None):
  539. """
  540. Read a sequence of s-expressions from the stream, and leave the
  541. stream's file position at the end the last complete s-expression
  542. read. This function will always return at least one s-expression,
  543. unless there are no more s-expressions in the file.
  544. If the file ends in in the middle of an s-expression, then that
  545. incomplete s-expression is returned when the end of the file is
  546. reached.
  547. :param block_size: The default block size for reading. If an
  548. s-expression is longer than one block, then more than one
  549. block will be read.
  550. :param comment_char: A character that marks comments. Any lines
  551. that begin with this character will be stripped out.
  552. (If spaces or tabs precede the comment character, then the
  553. line will not be stripped.)
  554. """
  555. start = stream.tell()
  556. block = stream.read(block_size)
  557. encoding = getattr(stream, 'encoding', None)
  558. assert encoding is not None or isinstance(block, compat.text_type)
  559. if encoding not in (None, 'utf-8'):
  560. import warnings
  561. warnings.warn('Parsing may fail, depending on the properties '
  562. 'of the %s encoding!' % encoding)
  563. # (e.g., the utf-16 encoding does not work because it insists
  564. # on adding BOMs to the beginning of encoded strings.)
  565. if comment_char:
  566. COMMENT = re.compile('(?m)^%s.*$' % re.escape(comment_char))
  567. while True:
  568. try:
  569. # If we're stripping comments, then make sure our block ends
  570. # on a line boundary; and then replace any comments with
  571. # space characters. (We can't just strip them out -- that
  572. # would make our offset wrong.)
  573. if comment_char:
  574. block += stream.readline()
  575. block = re.sub(COMMENT, _sub_space, block)
  576. # Read the block.
  577. tokens, offset = _parse_sexpr_block(block)
  578. # Skip whitespace
  579. offset = re.compile(r'\s*').search(block, offset).end()
  580. # Move to the end position.
  581. if encoding is None:
  582. stream.seek(start+offset)
  583. else:
  584. stream.seek(start+len(block[:offset].encode(encoding)))
  585. # Return the list of tokens we processed
  586. return tokens
  587. except ValueError as e:
  588. if e.args[0] == 'Block too small':
  589. next_block = stream.read(block_size)
  590. if next_block:
  591. block += next_block
  592. continue
  593. else:
  594. # The file ended mid-sexpr -- return what we got.
  595. return [block.strip()]
  596. else: raise
  597. def _sub_space(m):
  598. """Helper function: given a regexp match, return a string of
  599. spaces that's the same length as the matched string."""
  600. return ' '*(m.end()-m.start())
  601. def _parse_sexpr_block(block):
  602. tokens = []
  603. start = end = 0
  604. while end < len(block):
  605. m = re.compile(r'\S').search(block, end)
  606. if not m:
  607. return tokens, end
  608. start = m.start()
  609. # Case 1: sexpr is not parenthesized.
  610. if m.group() != '(':
  611. m2 = re.compile(r'[\s(]').search(block, start)
  612. if m2:
  613. end = m2.start()
  614. else:
  615. if tokens: return tokens, end
  616. raise ValueError('Block too small')
  617. # Case 2: parenthesized sexpr.
  618. else:
  619. nesting = 0
  620. for m in re.compile(r'[()]').finditer(block, start):
  621. if m.group()=='(': nesting += 1
  622. else: nesting -= 1
  623. if nesting == 0:
  624. end = m.end()
  625. break
  626. else:
  627. if tokens: return tokens, end
  628. raise ValueError('Block too small')
  629. tokens.append(block[start:end])
  630. return tokens, end
  631. ######################################################################
  632. #{ Finding Corpus Items
  633. ######################################################################
  634. def find_corpus_fileids(root, regexp):
  635. if not isinstance(root, PathPointer):
  636. raise TypeError('find_corpus_fileids: expected a PathPointer')
  637. regexp += '$'
  638. # Find fileids in a zipfile: scan the zipfile's namelist. Filter
  639. # out entries that end in '/' -- they're directories.
  640. if isinstance(root, ZipFilePathPointer):
  641. fileids = [name[len(root.entry):] for name in root.zipfile.namelist()
  642. if not name.endswith('/')]
  643. items = [name for name in fileids if re.match(regexp, name)]
  644. return sorted(items)
  645. # Find fileids in a directory: use os.walk to search all (proper
  646. # or symlinked) subdirectories, and match paths against the regexp.
  647. elif isinstance(root, FileSystemPathPointer):
  648. items = []
  649. # workaround for py25 which doesn't support followlinks
  650. kwargs = {}
  651. if not py25():
  652. kwargs = {'followlinks': True}
  653. for dirname, subdirs, fileids in os.walk(root.path, **kwargs):
  654. prefix = ''.join('%s/' % p for p in _path_from(root.path, dirname))
  655. items += [prefix+fileid for fileid in fileids
  656. if re.match(regexp, prefix+fileid)]
  657. # Don't visit svn directories:
  658. if '.svn' in subdirs: subdirs.remove('.svn')
  659. return sorted(items)
  660. else:
  661. raise AssertionError("Don't know how to handle %r" % root)
  662. def _path_from(parent, child):
  663. if os.path.split(parent)[1] == '':
  664. parent = os.path.split(parent)[0]
  665. path = []
  666. while parent != child:
  667. child, dirname = os.path.split(child)
  668. path.insert(0, dirname)
  669. assert os.path.split(child)[0] != child
  670. return path
  671. ######################################################################
  672. #{ Paragraph structure in Treebank files
  673. ######################################################################
  674. def tagged_treebank_para_block_reader(stream):
  675. # Read the next paragraph.
  676. para = ''
  677. while True:
  678. line = stream.readline()
  679. # End of paragraph:
  680. if re.match('======+\s*$', line):
  681. if para.strip(): return [para]
  682. # End of file:
  683. elif line == '':
  684. if para.strip(): return [para]
  685. else: return []
  686. # Content line:
  687. else:
  688. para += line