"""tokenizer.py

Functions:
tokenize       Takes free text and returns a list of tokens.
tokenize_str   Same as tokenize, but takes a string instead of a handle.
tokenize_file  Same as tokenize, but takes a filename.

"""
import string
import mx.TextTools as TT

import memoize
def _get_tag_table():
    unprintable_set = TT.set(string.printable, 0)
    whitespace_no_newline_set = TT.set(string.whitespace.replace("\n", ""))
    punctuation_set = TT.set(string.punctuation)
    tag_table = (
        (None, TT.EOF, TT.Here, 1, TT.MatchOk),
        ('TEXT', TT.AllInSet, TT.alphanumeric_set, 1, -1),
        ('WHITESPACE', TT.AllInSet, whitespace_no_newline_set, 1, -2),
        ('NEWLINE', TT.Is, "\n", 1, -3),
        ('PUNCTUATION', TT.IsInSet, punctuation_set, 1, -4),
        ('UNPRINTABLE', TT.IsInSet, unprintable_set, TT.MatchFail, -5),
        )
    return tag_table
_get_tag_table = memoize.memoize(_get_tag_table)

def tokenize(handle):
    """tokenize(handle) -> list of tokens"""
    if not hasattr(handle, 'read'):
        raise TypeError, "I expected a handle and got a %s" % type(handle)
    return tokenize_str(handle.read())

def tokenize_str(string):
    """tokenize_str(string) -> list of tokens"""
    # This implementation uses TextTools directly instead of going
    # through Martel.  It's about twice as fast.
    string = str(string)
    table = _get_tag_table()
    success, taglist, nextindex = TT.tag(string, table)
    if not success:
        raise SyntaxError, "Error parsing at %d" % nextindex
    tokens = []
    for name, start, end, x in taglist:
        tokens.append(string[start:end])
    return tokens

def tokenize_file(filename):
    """tokenize_file(filename) -> list of tokens"""
    return tokenize(open(filename))
