Code Coverage for nltk.corpus.reader.util
Untested Functions
|
Partially Tested Functions
|
import os, sys, bisect, re, tempfile
try: import cPickle as pickle
except ImportError: import pickle
from itertools import islice
from nltk.corpus.reader.api import CorpusReader
from nltk import tokenize
from nltk.etree import ElementTree
from nltk.internals import deprecated, slice_bounds
from nltk.util import AbstractLazySequence, LazySubsequence, LazyConcatenation
from nltk.data import PathPointer, FileSystemPathPointer, ZipFilePathPointer
class StreamBackedCorpusView(AbstractLazySequence):
"""
A 'view' of a corpus file, which acts like a sequence of tokens:
it can be accessed by index, iterated over, etc. However, the
tokens are only constructed as-needed -- the entire corpus is
never stored in memory at once.
The constructor to C{StreamBackedCorpusView} takes two arguments:
a corpus filename (specified as a string or as a L{PathPointer});
and a block reader. A X{block reader} is a function that reads
zero or more tokens from a stream, and returns them as a list. A
very simple example of a block reader is:
>>> def simple_block_reader(stream):
... return stream.readline().split()
This simple block reader reads a single line at a time, and
returns a single token (consisting of a string) for each
whitespace-separated substring on the line.
When deciding how to define the block reader for a given
corpus, careful consideration should be given to the size of
blocks handled by the block reader. Smaller block sizes will
increase the memory requirements of the corpus view's internal
data structures (by 2 integers per block). On the other hand,
larger block sizes may decrease performance for random access to
the corpus. (But note that larger block sizes will I{not}
decrease performance for iteration.)
Internally, C{CorpusView} maintains a partial mapping from token
index to file position, with one entry per block. When a token
with a given index M{i} is requested, the C{CorpusView} constructs
it as follows:
1. First, it searches the toknum/filepos mapping for the token
index closest to (but less than or equal to) M{i}.
2. Then, starting at the file position corresponding to that
index, it reads one block at a time using the block reader
until it reaches the requested token.
The toknum/filepos mapping is created lazily: it is initially
empty, but every time a new block is read, the block's
initial token is added to the mapping. (Thus, the toknum/filepos
map has one entry per block.)
In order to increase efficiency for random access patterns that
have high degrees of locality, the corpus view may cache one or
more blocks.
@note: Each C{CorpusView} object internally maintains an open file
object for its underlying corpus file. This file should be
automatically closed when the C{CorpusView} is garbage collected,
but if you wish to close it manually, use the L{close()}
method. If you access a C{CorpusView}'s items after it has been
closed, the file object will be automatically re-opened.
@warning: If the contents of the file are modified during the
lifetime of the C{CorpusView}, then the C{CorpusView}'s behavior
is undefined.
@warning: If a unicode encoding is specified when constructing a
C{CorpusView}, then the block reader may only call
C{stream.seek()} with offsets that have been returned by
C{stream.tell()}; in particular, calling C{stream.seek()} with
relative offsets, or with offsets based on string lengths, may
lead to incorrect behavior.
@ivar _block_reader: The function used to read
a single block from the underlying file stream.
@ivar _toknum: A list containing the token index of each block
that has been processed. In particular, C{_toknum[i]} is the
token index of the first token in block C{i}. Together
with L{_filepos}, this forms a partial mapping between token
indices and file positions.
@ivar _filepos: A list containing the file position of each block
that has been processed. In particular, C{_toknum[i]} is the
file position of the first character in block C{i}. Together
with L{_toknum}, this forms a partial mapping between token
indices and file positions.
@ivar _stream: The stream used to access the underlying corpus file.
@ivar _len: The total number of tokens in the corpus, if known;
or C{None}, if the number of tokens is not yet known.
@ivar _eofpos: The character position of the last character in the
file. This is calculated when the corpus view is initialized,
and is used to decide when the end of file has been reached.
@ivar _cache: A cache of the most recently read block. It
is encoded as a tuple (start_toknum, end_toknum, tokens), where
start_toknum is the token index of the first token in the block;
end_toknum is the token index of the first token not in the
block; and tokens is a list of the tokens in the block.
"""
def __init__(self, filename, block_reader=None, startpos=0,
encoding=None):
"""
Create a new corpus view, based on the file C{filename}, and
read with C{block_reader}. See the class documentation
for more information.
@param filename: The path to the file that is read by this
corpus view. C{filename} can either be a string or a
L{PathPointer}.
@param startpos: The file position at which the view will
start reading. This can be used to skip over preface
sections.
@param encoding: The unicode encoding that should be used to
read the file's contents. If no encoding is specified,
then the file's contents will be read as a non-unicode
string (i.e., a C{str}).
"""
if block_reader:
self.read_block = block_reader
self._toknum = [0]
self._filepos = [startpos]
self._encoding = encoding
self._len = None
self._filename = filename
self._stream = None
self._current_toknum = None
"""This variable is set to the index of the next token that
will be read, immediately before L{self.read_block()} is
called. This is provided for the benefit of the block
reader, which under rare circumstances may need to know
the current token number."""
self._current_blocknum = None
"""This variable is set to the index of the next block that
will be read, immediately before L{self.read_block()} is
called. This is provided for the benefit of the block
reader, which under rare circumstances may need to know
the current block number."""
try:
if isinstance(self._filename, PathPointer):
self._eofpos = self._filename.file_size()
else:
self._eofpos = os.stat(self._filename).st_size
except Exception, exc:
raise ValueError('Unable to open or access %r -- %s' %
(filename, exc))
self._cache = (-1, -1, None)
filename = property(lambda self: self._filename, doc="""
The filename of the file that is accessed by this view.
@type: C{str} or L{PathPointer}""")
def read_block(self, stream):
"""
Read a block from the input stream.
@return: a block of tokens from the input stream
@rtype: list of any
@param stream: an input stream
@type stream: stream
"""
raise NotImplementedError('Abstract Method')
def _open(self):
"""
Open the file stream associated with this corpus view. This
will be called performed if any value is read from the view
while its file stream is closed.
"""
if isinstance(self._filename, PathPointer):
self._stream = self._filename.open(self._encoding)
elif self._encoding:
self._stream = SeekableUnicodeStreamReader(
open(self._filename, 'rb'), self._encoding)
else:
self._stream = open(self._filename, 'rb')
def close(self):
"""
Close the file stream associated with this corpus view. This
can be useful if you are worried about running out of file
handles (although the stream should automatically be closed
upon garbage collection of the corpus view). If the corpus
view is accessed after it is closed, it will be automatically
re-opened.
"""
if self._stream is not None:
self._stream.close()
self._stream = None
def __len__(self):
if self._len is None:
for tok in self.iterate_from(self._toknum[-1]): pass
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
start, stop = slice_bounds(self, i)
offset = self._cache[0]
if offset <= start and stop <= self._cache[1]:
return self._cache[2][start-offset:stop-offset]
return LazySubsequence(self, start, stop)
else:
if i < 0: i += len(self)
if i < 0: raise IndexError('index out of range')
offset = self._cache[0]
if offset <= i < self._cache[1]:
return self._cache[2][i-offset]
try:
return self.iterate_from(i).next()
except StopIteration:
raise IndexError('index out of range')
def iterate_from(self, start_tok):
if start_tok < self._toknum[-1]:
block_index = bisect.bisect_right(self._toknum, start_tok)-1
toknum = self._toknum[block_index]
filepos = self._filepos[block_index]
else:
block_index = len(self._toknum)-1
toknum = self._toknum[-1]
filepos = self._filepos[-1]
if self._stream is None:
self._open()
while filepos < self._eofpos:
self._stream.seek(filepos)
self._current_toknum = toknum
self._current_blocknum = block_index
tokens = self.read_block(self._stream)
assert isinstance(tokens, (tuple, list)), (
'block reader %s() should return list or tuple.' %
self.read_block.__name__)
num_toks = len(tokens)
new_filepos = self._stream.tell()
assert new_filepos > filepos, (
'block reader %s() should consume at least 1 byte' %
(self.read_block.__name__, filepos))
self._cache = (toknum, toknum+num_toks, list(tokens))
assert toknum <= self._toknum[-1]
if num_toks > 0:
block_index += 1
if toknum == self._toknum[-1]:
assert new_filepos > self._filepos[-1]
self._filepos.append(new_filepos)
self._toknum.append(toknum+num_toks)
else:
assert new_filepos == self._filepos[block_index], (
'inconsistent block reader (num chars read)')
assert toknum+num_toks == self._toknum[block_index], (
'inconsistent block reader (num tokens returned)')
for tok in tokens[max(0, start_tok-toknum):]:
yield tok
assert new_filepos <= self._eofpos
if new_filepos == self._eofpos:
self._len = toknum + num_toks
break
toknum += num_toks
filepos = new_filepos
assert self._len is not None
def __add__(self, other):
return concat([self, other])
def __radd__(self, other):
return concat([other, self])
def __mul__(self, count):
return concat([self] * count)
def __rmul__(self, count):
return concat([self] * count)
class ConcatenatedCorpusView(AbstractLazySequence):
"""
A 'view' of a corpus file that joins together one or more
L{StreamBackedCorpusViews<StreamBackedCorpusView>}. At most
one file handle is left open at any time.
"""
def __init__(self, corpus_views):
self._pieces = corpus_views
"""A list of the corpus subviews that make up this
concatenation."""
self._offsets = [0]
"""A list of offsets, indicating the index at which each
subview begins. In particular::
offsets[i] = sum([len(p) for p in pieces[:i]])"""
self._open_piece = None
"""The most recently accessed corpus subview (or C{None}).
Before a new subview is accessed, this subview will be closed."""
def __len__(self):
if len(self._offsets) <= len(self._pieces):
for tok in self.iterate_from(self._offsets[-1]): pass
return self._offsets[-1]
def close(self):
for piece in self._pieces:
piece.close()
def iterate_from(self, start_tok):
piecenum = bisect.bisect_right(self._offsets, start_tok)-1
while piecenum < len(self._pieces):
offset = self._offsets[piecenum]
piece = self._pieces[piecenum]
if self._open_piece is not piece:
if self._open_piece is not None:
self._open_piece.close()
self._open_piece = piece
for tok in piece.iterate_from(max(0, start_tok-offset)):
yield tok
if piecenum+1 == len(self._offsets):
self._offsets.append(self._offsets[-1] + len(piece))
piecenum += 1
def concat(docs):
"""
Concatenate together the contents of multiple documents from a
single corpus, using an appropriate concatenation function. This
utility function is used by corpus readers when the user requests
more than one document at a time.
"""
if len(docs) == 1:
return docs[0]
if len(docs) == 0:
raise ValueError('concat() expects at least one object!')
types = set([d.__class__ for d in docs])
if types.issubset([str, unicode, basestring]):
return reduce((lambda a,b:a+b), docs, '')
for typ in types:
if not issubclass(typ, (StreamBackedCorpusView,
ConcatenatedCorpusView)):
break
else:
return ConcatenatedCorpusView(docs)
for typ in types:
if not issubclass(typ, AbstractLazySequence):
break
else:
return LazyConcatenation(docs)
if len(types) == 1:
typ = list(types)[0]
if issubclass(typ, list):
return reduce((lambda a,b:a+b), docs, [])
if issubclass(typ, tuple):
return reduce((lambda a,b:a+b), docs, ())
if ElementTree.iselement(typ):
xmltree = ElementTree.Element('documents')
for doc in docs: xmltree.append(doc)
return xmltree
raise ValueError("Don't know how to concatenate types: %r" % types)
class PickleCorpusView(StreamBackedCorpusView):
"""
A stream backed corpus view for corpus files that consist of
sequences of serialized Python objects (serialized using
C{pickle.dump}). One use case for this class is to store the
result of running feature detection on a corpus to disk. This can
be useful when performing feature detection is expensive (so we
don't want to repeat it); but the corpus is too large to store in
memory. The following example illustrates this technique:
>>> feature_corpus = LazyMap(detect_features, corpus)
>>> PickleCorpusView.write(feature_corpus, some_filename)
>>> pcv = PickledCorpusView(some_filename)
"""
BLOCK_SIZE = 100
PROTOCOL = -1
def __init__(self, filename, delete_on_gc=False):
"""
Create a new corpus view that reads the pickle corpus
C{filename}.
@param delete_on_gc: If true, then C{filename} will be deleted
whenever this object gets garbage-collected.
"""
self._delete_on_gc = delete_on_gc
StreamBackedCorpusView.__init__(self, filename)
def read_block(self, stream):
result = []
for i in range(self.BLOCK_SIZE):
try: result.append(pickle.load(stream))
except EOFError: break
return result
def __del__(self):
"""
If C{delete_on_gc} was set to true when this
C{PickleCorpusView} was created, then delete the corpus view's
filename. (This method is called whenever a
C{PickledCorpusView} is garbage-collected.
"""
if getattr(self, '_delete_on_gc'):
if os.path.exists(self._filename):
try: os.remove(self._filename)
except (OSError, IOError): pass
self.__dict__.clear()
@classmethod
def write(cls, sequence, output_file):
if isinstance(output_file, basestring):
output_file = open(output_file, 'wb')
for item in sequence:
pickle.dump(item, output_file, cls.PROTOCOL)
@classmethod
def cache_to_tempfile(cls, sequence, delete_on_gc=True):
"""
Write the given sequence to a temporary file as a pickle
corpus; and then return a C{PickleCorpusView} view for that
temporary corpus file.
@param delete_on_gc: If true, then the temporary file will be
deleted whenever this object gets garbage-collected.
"""
try:
fd, output_file_name = tempfile.mkstemp('.pcv', 'nltk-')
output_file = os.fdopen(fd, 'wb')
cls.write(sequence, output_file)
output_file.close()
return PickleCorpusView(output_file_name, delete_on_gc)
except (OSError, IOError), e:
raise ValueError('Error while creating temp file: %s' % e)
def read_whitespace_block(stream):
toks = []
for i in range(20):
toks.extend(stream.readline().split())
return toks
def read_wordpunct_block(stream):
toks = []
for i in range(20):
toks.extend(wordpuct_tokenize(stream.readline()))
return toks
def read_line_block(stream):
toks = []
for i in range(20):
line = stream.readline()
if not line: return toks
toks.append(line.replace('\n', ''))
return toks
def read_blankline_block(stream):
s = ''
while True:
line = stream.readline()
if not line:
if s: return [s]
else: return []
elif line and not line.strip():
if s: return [s]
else:
s += line
def read_regexp_block(stream, start_re, end_re=None):
"""
Read a sequence of tokens from a stream, where tokens begin with
lines that match C{start_re}. If C{end_re} is specified, then
tokens end with lines that match C{end_re}; otherwise, tokens end
whenever the next line matching C{start_re} or EOF is found.
"""
while True:
line = stream.readline()
if not line: return []
if re.match(start_re, line): break
lines = [line]
while True:
oldpos = stream.tell()
line = stream.readline()
if not line:
return [''.join(lines)]
if end_re is not None and re.match(end_re, line):
return [''.join(lines)]
if end_re is None and re.match(start_re, line):
stream.seek(oldpos)
return [''.join(lines)]
lines.append(line)
def read_sexpr_block(stream, block_size=16384, comment_char=None):
"""
Read a sequence of s-expressions from the stream, and leave the
stream's file position at the end the last complete s-expression
read. This function will always return at least one s-expression,
unless there are no more s-expressions in the file.
If the file ends in in the middle of an s-expression, then that
incomplete s-expression is returned when the end of the file is
reached.
@param block_size: The default block size for reading. If an
s-expression is longer than one block, then more than one
block will be read.
@param comment_char: A character that marks comments. Any lines
that begin with this character will be stripped out.
(If spaces or tabs preceed the comment character, then the
line will not be stripped.)
"""
start = stream.tell()
block = stream.read(block_size)
encoding = getattr(stream, 'encoding', None)
assert encoding is not None or isinstance(block, str)
if encoding not in (None, 'utf-8'):
import warnings
warnings.warn('Parsing may fail, depending on the properties '
'of the %s encoding!' % encoding)
if comment_char:
COMMENT = re.compile('(?m)^%s.*$' % re.escape(comment_char))
while True:
try:
if comment_char:
block += stream.readline()
block = re.sub(COMMENT, _sub_space, block)
tokens, offset = _parse_sexpr_block(block)
offset = re.compile(r'\s*').search(block, offset).end()
if encoding is None:
stream.seek(start+offset)
else:
stream.seek(start+len(block[:offset].encode(encoding)))
return tokens
except ValueError, e:
if e.args[0] == 'Block too small':
next_block = stream.read(block_size)
if next_block:
block += next_block
continue
else:
return [block.strip()]
else: raise
def _sub_space(m):
"""Helper function: given a regexp match, return a string of
spaces that's the same length as the matched string."""
return ' '*(m.end()-m.start())
def _parse_sexpr_block(block):
tokens = []
start = end = 0
while end < len(block):
m = re.compile(r'\S').search(block, end)
if not m:
return tokens, end
start = m.start()
if m.group() != '(':
m2 = re.compile(r'[\s(]').search(block, start)
if m2:
end = m2.start()
else:
if tokens: return tokens, end
raise ValueError('Block too small')
else:
nesting = 0
for m in re.compile(r'[()]').finditer(block, start):
if m.group()=='(': nesting += 1
else: nesting -= 1
if nesting == 0:
end = m.end()
break
else:
if tokens: return tokens, end
raise ValueError('Block too small')
tokens.append(block[start:end])
return tokens, end
class SyntaxCorpusReader(CorpusReader):
"""
An abstract base class for reading corpora consisting of
syntactically parsed text. Subclasses should define:
- L{__init__}, which specifies the location of the corpus
and a method for detecting the sentence blocks in corpus files.
- L{_read_block}, which reads a block from the input stream.
- L{_word}, which takes a block and returns a list of list of words.
- L{_tag}, which takes a block and returns a list of list of tagged
words.
- L{_parse}, which takes a block and returns a list of parsed
sentences.
"""
def _parse(self, s):
raise AssertionError('Abstract method')
def _word(self, s):
raise AssertionError('Abstract method')
def _tag(self, s):
raise AssertionError('Abstract method')
def _read_block(self, stream):
raise AssertionError('Abstract method')
def raw(self, files=None):
if files is None: files = self._files
elif isinstance(files, basestring): files = [files]
return concat([self.open(f).read() for f in files])
def parsed_sents(self, files=None):
reader = self._read_parsed_sent_block
return concat([StreamBackedCorpusView(filename, reader, encoding=enc)
for filename, enc in self.abspaths(files, True)])
def tagged_sents(self, files=None, simplify_tags=False):
def reader(stream):
return self._read_tagged_sent_block(stream, simplify_tags)
return concat([StreamBackedCorpusView(filename, reader, encoding=enc)
for filename, enc in self.abspaths(files, True)])
def sents(self, files=None):
reader = self._read_sent_block
return concat([StreamBackedCorpusView(filename, reader, encoding=enc)
for filename, enc in self.abspaths(files, True)])
def tagged_words(self, files=None, simplify_tags=False):
def reader(stream):
return self._read_tagged_word_block(stream, simplify_tags)
return concat([StreamBackedCorpusView(filename, reader, encoding=enc)
for filename, enc in self.abspaths(files, True)])
def words(self, files=None):
return concat([StreamBackedCorpusView(filename,
self._read_word_block,
encoding=enc)
for filename, enc in self.abspaths(files, True)])
def _read_word_block(self, stream):
return sum(self._read_sent_block(stream), [])
def _read_tagged_word_block(self, stream, simplify_tags=False):
return sum(self._read_tagged_sent_block(stream, simplify_tags), [])
def _read_sent_block(self, stream):
return filter(None, [self._word(t) for t in self._read_block(stream)])
def _read_tagged_sent_block(self, stream, simplify_tags=False):
return filter(None, [self._tag(t, simplify_tags)
for t in self._read_block(stream)])
def _read_parsed_sent_block(self, stream):
return filter(None, [self._parse(t) for t in self._read_block(stream)])
@deprecated("Use .raw() or .sents() or .tagged_sents() or "
".parsed_sents() instead.")
def read(self, items=None, format='parsed'):
if format == 'parsed': return self.parsed_sents(items)
if format == 'raw': return self.raw(items)
if format == 'tokenized': return self.sents(items)
if format == 'tagged': return self.tagged_sents(items)
raise ValueError('bad format %r' % format)
@deprecated("Use .parsed_sents() instead.")
def parsed(self, items=None):
return self.parsed_sents(items)
@deprecated("Use .sents() instead.")
def tokenized(self, items=None):
return self.sents(items)
@deprecated("Use .tagged_sents() instead.")
def tagged(self, items=None):
return self.tagged_sents(items)
def find_corpus_files(root, regexp):
if not isinstance(root, PathPointer):
raise TypeError('find_corpus_files: expected a PathPointer')
regexp += '$'
if isinstance(root, ZipFilePathPointer):
files = [name[len(root.entry):] for name in root.zipfile.namelist()
if not name.endswith('/')]
items = [name for name in files if re.match(regexp, name)]
return tuple(sorted(items))
elif isinstance(root, FileSystemPathPointer):
items = []
for dirname, subdirs, filenames in os.walk(root.path):
prefix = ''.join('%s/' % p for p in _path_from(root.path, dirname))
items += [prefix+filename for filename in filenames
if re.match(regexp, prefix+filename)]
if '.svn' in subdirs: subdirs.remove('.svn')
return tuple(sorted(items))
else:
raise AssertionError("Don't know how to handle %r" % root)
def _path_from(parent, child):
if os.path.split(parent)[1] == '':
parent = os.path.split(parent)[0]
path = []
while parent != child:
child, dirname = os.path.split(child)
path.insert(0, dirname)
assert os.path.split(child)[0] != child
return path
def tagged_treebank_para_block_reader(stream):
para = ''
while True:
line = stream.readline()
if re.match('======+\s*$', line):
if para.strip(): return [para]
elif line == '':
if para.strip(): return [para]
else: return []
else:
para += line