-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
# ply: lex.py
#
-# Author: David M. Beazley (beazley@cs.uchicago.edu)
-# Department of Computer Science
-# University of Chicago
-# Chicago, IL 60637
-#
-# Copyright (C) 2001, David M. Beazley
+# Author: David M. Beazley (dave@dabeaz.com)
#
-# $Header: /home/davidb/src/cvs/python-rwhoisd/rwhoisd/lex.py,v 1.1 2003/04/22 02:19:07 davidb Exp $
+# Copyright (C) 2001-2008, David M. Beazley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
-#
+#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
-#
+#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-# See the file COPYING for a complete copy of the LGPL.
-#
-#
-# This module automatically constructs a lexical analysis module from regular
-# expression rules defined in a user-defined module. The idea is essentially the same
-# as that used in John Aycock's Spark framework, but the implementation works
-# at the module level rather than requiring the use of classes.
-#
-# This module tries to provide an interface that is closely modeled after
-# the traditional lex interface in Unix. It also differs from Spark
-# in that:
-#
-# - It provides more extensive error checking and reporting if
-# the user supplies a set of regular expressions that can't
-# be compiled or if there is any other kind of a problem in
-# the specification.
-#
-# - The interface is geared towards LALR(1) and LR(1) parser
-# generators. That is tokens are generated one at a time
-# rather than being generated in advanced all in one step.
-#
-# There are a few limitations of this module
-#
-# - The module interface makes it somewhat awkward to support more
-# than one lexer at a time. Although somewhat inelegant from a
-# design perspective, this is rarely a practical concern for
-# most compiler projects.
-#
-# - The lexer requires that the entire input text be read into
-# a string before scanning. I suppose that most machines have
-# enough memory to make this a minor issues, but it makes
-# the lexer somewhat difficult to use in interactive sessions
-# or with streaming data.
#
-#-----------------------------------------------------------------------------
-
-r"""
-lex.py
-
-This module builds lex-like scanners based on regular expression rules.
-To use the module, simply write a collection of regular expression rules
-and actions like this:
-
-# lexer.py
-import lex
-
-# Define a list of valid tokens
-tokens = (
- 'IDENTIFIER', 'NUMBER', 'PLUS', 'MINUS'
- )
-
-# Define tokens as functions
-def t_IDENTIFIER(t):
- r' ([a-zA-Z_](\w|_)* '
- return t
-
-def t_NUMBER(t):
- r' \d+ '
- return t
-
-# Some simple tokens with no actions
-t_PLUS = r'\+'
-t_MINUS = r'-'
-
-# Initialize the lexer
-lex.lex()
-
-The tokens list is required and contains a complete list of all valid
-token types that the lexer is allowed to produce. Token types are
-restricted to be valid identifiers. This means that 'MINUS' is a valid
-token type whereas '-' is not.
-
-Rules are defined by writing a function with a name of the form
-t_rulename. Each rule must accept a single argument which is
-a token object generated by the lexer. This token has the following
-attributes:
-
- t.type = type string of the token. This is initially set to the
- name of the rule without the leading t_
- t.value = The value of the lexeme.
- t.lineno = The value of the line number where the token was encountered
-
-For example, the t_NUMBER() rule above might be called with the following:
-
- t.type = 'NUMBER'
- t.value = '42'
- t.lineno = 3
-
-Each rule returns the token object it would like to supply to the
-parser. In most cases, the token t is returned with few, if any
-modifications. To discard a token for things like whitespace or
-comments, simply return nothing. For instance:
-
-def t_whitespace(t):
- r' \s+ '
- pass
-
-For faster lexing, you can also define this in terms of the ignore set like this:
-
-t_ignore = ' \t'
-
-The characters in this string are ignored by the lexer. Use of this feature can speed
-up parsing significantly since scanning will immediately proceed to the next token.
-
-lex requires that the token returned by each rule has an attribute
-t.type. Other than this, rules are free to return any kind of token
-object that they wish and may construct a new type of token object
-from the attributes of t (provided the new object has the required
-type attribute).
-
-If illegal characters are encountered, the scanner executes the
-function t_error(t) where t is a token representing the rest of the
-string that hasn't been matched. If this function isn't defined, a
-LexError exception is raised. The .text attribute of this exception
-object contains the part of the string that wasn't matched.
-
-The t.skip(n) method can be used to skip ahead n characters in the
-input stream. This is usually only used in the error handling rule.
-For instance, the following rule would print an error message and
-continue:
-
-def t_error(t):
- print "Illegal character in input %s" % t.value[0]
- t.skip(1)
-
-Of course, a nice scanner might wish to skip more than one character
-if the input looks very corrupted.
-
-The lex module defines a t.lineno attribute on each token that can be used
-to track the current line number in the input. The value of this
-variable is not modified by lex so it is up to your lexer module
-to correctly update its value depending on the lexical properties
-of the input language. To do this, you might write rules such as
-the following:
-
-def t_newline(t):
- r' \n+ '
- t.lineno += t.value.count("\n")
-
-To initialize your lexer so that it can be used, simply call the lex.lex()
-function in your rule file. If there are any errors in your
-specification, warning messages or an exception will be generated to
-alert you to the problem.
-
-(dave: this needs to be rewritten)
-To use the newly constructed lexer from another module, simply do
-this:
-
- import lex
- import lexer
- plex.input("position = initial + rate*60")
+# See the file COPYING for a complete copy of the LGPL.
+# -----------------------------------------------------------------------------
- while 1:
- token = plex.token() # Get a token
- if not token: break # No more tokens
- ... do whatever ...
+__version__ = "2.5"
+__tabversion__ = "2.4" # Version of table file used
-Assuming that the module 'lexer' has initialized plex as shown
-above, parsing modules can safely import 'plex' without having
-to import the rule file or any additional imformation about the
-scanner you have defined.
-"""
+import re, sys, types, copy, os
-# -----------------------------------------------------------------------------
+# This regular expression is used to match valid token names
+_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
+# _INSTANCETYPE sets the valid set of instance types recognized
+# by PLY when lexers are defined by a class. In order to maintain
+# backwards compatibility with Python-2.0, we have to check for
+# the existence of ObjectType.
-__version__ = "1.3"
+try:
+ _INSTANCETYPE = (types.InstanceType, types.ObjectType)
+except AttributeError:
+ _INSTANCETYPE = types.InstanceType
+ class object: pass # Note: needed if no new-style classes present
-import re, types, sys, copy
+# Exception thrown when invalid token encountered and no default error
+# handler is defined.
-# Exception thrown when invalid token encountered and no default
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
-# Token class
-class LexToken:
+# An object used to issue one-time warning messages for various features
+
+class LexWarning(object):
+ def __init__(self):
+ self.warned = 0
+ def __call__(self,msg):
+ if not self.warned:
+ sys.stderr.write("ply.lex: Warning: " + msg+"\n")
+ self.warned = 1
+
+_SkipWarning = LexWarning() # Warning for use of t.skip() on tokens
+
+# Token class. This class is used to represent the tokens produced.
+class LexToken(object):
def __str__(self):
- return "LexToken(%s,%r,%d)" % (self.type,self.value,self.lineno)
+ return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
def skip(self,n):
- try:
- self._skipn += n
- except AttributeError:
- self._skipn = n
+ self.lexer.skip(n)
+ _SkipWarning("Calling t.skip() on a token is deprecated. Please use t.lexer.skip()")
# -----------------------------------------------------------------------------
# Lexer class
#
+# This class encapsulates all of the methods and data associated with a lexer.
+#
# input() - Store a new string in the lexer
# token() - Get the next token
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
- self.lexre = None # Master regular expression
- self.lexdata = None # Actual input data (as a string)
- self.lexpos = 0 # Current position in input text
- self.lexlen = 0 # Length of the input text
- self.lexindexfunc = [ ] # Reverse mapping of groups to functions and types
- self.lexerrorf = None # Error rule (if any)
- self.lextokens = None # List of valid tokens
- self.lexignore = None # Ignored characters
- self.lineno = 1 # Current line number
- self.debug = 0 # Debugging mode
- self.optimize = 0 # Optimized mode
- self.token = self.errtoken
-
- def __copy__(self):
- c = Lexer()
- c.lexre = self.lexre
- c.lexdata = self.lexdata
- c.lexpos = self.lexpos
- c.lexlen = self.lexlen
- c.lenindexfunc = self.lexindexfunc
- c.lexerrorf = self.lexerrorf
- c.lextokens = self.lextokens
- c.lexignore = self.lexignore
- c.lineno = self.lineno
- c.optimize = self.optimize
- c.token = c.realtoken
+ self.lexre = None # Master regular expression. This is a list of
+ # tuples (re,findex) where re is a compiled
+ # regular expression and findex is a list
+ # mapping regex group numbers to rules
+ self.lexretext = None # Current regular expression strings
+ self.lexstatere = {} # Dictionary mapping lexer states to master regexs
+ self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
+ self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
+ self.lexstate = "INITIAL" # Current lexer state
+ self.lexstatestack = [] # Stack of lexer states
+ self.lexstateinfo = None # State information
+ self.lexstateignore = {} # Dictionary of ignored characters for each state
+ self.lexstateerrorf = {} # Dictionary of error functions for each state
+ self.lexreflags = 0 # Optional re compile flags
+ self.lexdata = None # Actual input data (as a string)
+ self.lexpos = 0 # Current position in input text
+ self.lexlen = 0 # Length of the input text
+ self.lexerrorf = None # Error rule (if any)
+ self.lextokens = None # List of valid tokens
+ self.lexignore = "" # Ignored characters
+ self.lexliterals = "" # Literal characters that can be passed through
+ self.lexmodule = None # Module
+ self.lineno = 1 # Current line number
+ self.lexdebug = 0 # Debugging mode
+ self.lexoptimize = 0 # Optimized mode
+
+ def clone(self,object=None):
+ c = copy.copy(self)
+
+ # If the object parameter has been supplied, it means we are attaching the
+ # lexer to a new object. In this case, we have to rebind all methods in
+ # the lexstatere and lexstateerrorf tables.
+
+ if object:
+ newtab = { }
+ for key, ritem in self.lexstatere.items():
+ newre = []
+ for cre, findex in ritem:
+ newfindex = []
+ for f in findex:
+ if not f or not f[0]:
+ newfindex.append(f)
+ continue
+ newfindex.append((getattr(object,f[0].__name__),f[1]))
+ newre.append((cre,newfindex))
+ newtab[key] = newre
+ c.lexstatere = newtab
+ c.lexstateerrorf = { }
+ for key, ef in self.lexstateerrorf.items():
+ c.lexstateerrorf[key] = getattr(object,ef.__name__)
+ c.lexmodule = object
+ return c
+
+ # ------------------------------------------------------------
+ # writetab() - Write lexer information to a table file
+ # ------------------------------------------------------------
+ def writetab(self,tabfile,outputdir=""):
+ if isinstance(tabfile,types.ModuleType):
+ return
+ basetabfilename = tabfile.split(".")[-1]
+ filename = os.path.join(outputdir,basetabfilename)+".py"
+ tf = open(filename,"w")
+ tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
+ tf.write("_lextokens = %s\n" % repr(self.lextokens))
+ tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
+ tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
+ tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
+
+ tabre = { }
+ # Collect all functions in the initial state
+ initial = self.lexstatere["INITIAL"]
+ initialfuncs = []
+ for part in initial:
+ for f in part[1]:
+ if f and f[0]:
+ initialfuncs.append(f)
+
+ for key, lre in self.lexstatere.items():
+ titem = []
+ for i in range(len(lre)):
+ titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
+ tabre[key] = titem
+
+ tf.write("_lexstatere = %s\n" % repr(tabre))
+ tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
+
+ taberr = { }
+ for key, ef in self.lexstateerrorf.items():
+ if ef:
+ taberr[key] = ef.__name__
+ else:
+ taberr[key] = None
+ tf.write("_lexstateerrorf = %s\n" % repr(taberr))
+ tf.close()
+
+ # ------------------------------------------------------------
+ # readtab() - Read lexer information from a tab file
+ # ------------------------------------------------------------
+ def readtab(self,tabfile,fdict):
+ if isinstance(tabfile,types.ModuleType):
+ lextab = tabfile
+ else:
+ exec "import %s as lextab" % tabfile
+ self.lextokens = lextab._lextokens
+ self.lexreflags = lextab._lexreflags
+ self.lexliterals = lextab._lexliterals
+ self.lexstateinfo = lextab._lexstateinfo
+ self.lexstateignore = lextab._lexstateignore
+ self.lexstatere = { }
+ self.lexstateretext = { }
+ for key,lre in lextab._lexstatere.items():
+ titem = []
+ txtitem = []
+ for i in range(len(lre)):
+ titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
+ txtitem.append(lre[i][0])
+ self.lexstatere[key] = titem
+ self.lexstateretext[key] = txtitem
+ self.lexstateerrorf = { }
+ for key,ef in lextab._lexstateerrorf.items():
+ self.lexstateerrorf[key] = fdict[ef]
+ self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
- if not isinstance(s,types.StringType):
+ # Pull off the first character to see if s looks like a string
+ c = s[:1]
+ if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)):
raise ValueError, "Expected a string"
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
- self.token = self.realtoken
-
- # Change the token routine to point to realtoken()
- global token
- if token == self.errtoken:
- token = self.token
# ------------------------------------------------------------
- # errtoken() - Return error if token is called with no data
+ # begin() - Changes the lexing state
+ # ------------------------------------------------------------
+ def begin(self,state):
+ if not self.lexstatere.has_key(state):
+ raise ValueError, "Undefined state"
+ self.lexre = self.lexstatere[state]
+ self.lexretext = self.lexstateretext[state]
+ self.lexignore = self.lexstateignore.get(state,"")
+ self.lexerrorf = self.lexstateerrorf.get(state,None)
+ self.lexstate = state
+
+ # ------------------------------------------------------------
+ # push_state() - Changes the lexing state and saves old on stack
+ # ------------------------------------------------------------
+ def push_state(self,state):
+ self.lexstatestack.append(self.lexstate)
+ self.begin(state)
+
+ # ------------------------------------------------------------
+ # pop_state() - Restores the previous state
+ # ------------------------------------------------------------
+ def pop_state(self):
+ self.begin(self.lexstatestack.pop())
+
+ # ------------------------------------------------------------
+ # current_state() - Returns the current lexing state
+ # ------------------------------------------------------------
+ def current_state(self):
+ return self.lexstate
+
+ # ------------------------------------------------------------
+ # skip() - Skip ahead n characters
# ------------------------------------------------------------
- def errtoken(self):
- raise RuntimeError, "No input string given with input()"
-
+ def skip(self,n):
+ self.lexpos += n
+
# ------------------------------------------------------------
# token() - Return the next token from the Lexer
#
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
- def realtoken(self):
+ def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
-
+
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
continue
# Look for a regular expression match
- m = self.lexre.match(lexdata,lexpos)
- if m:
- i = m.lastindex
- lexpos = m.end()
+ for lexre,lexindexfunc in self.lexre:
+ m = lexre.match(lexdata,lexpos)
+ if not m: continue
+
+ # Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
- tok.lexer = self
- func,tok.type = self.lexindexfunc[i]
+ tok.lexpos = lexpos
+
+ i = m.lastindex
+ func,tok.type = lexindexfunc[i]
+
if not func:
- self.lexpos = lexpos
- return tok
-
+ # If no token type was set, it's an ignored token
+ if tok.type:
+ self.lexpos = m.end()
+ return tok
+ else:
+ lexpos = m.end()
+ break
+
+ lexpos = m.end()
+
+ # if func not callable, it means it's an ignored token
+ if not callable(func):
+ break
+
# If token is processed by a function, call it
+
+ tok.lexer = self # Set additional attributes useful in token rules
+ self.lexmatch = m
self.lexpos = lexpos
+
newtok = func(tok)
- self.lineno = tok.lineno # Update line number
-
+
# Every function must return a token, if nothing, we just move to next token
- if not newtok: continue
-
+ if not newtok:
+ lexpos = self.lexpos # This is here in case user has updated lexpos.
+ lexignore = self.lexignore # This is here in case there was a state change
+ break
+
# Verify type of the token. If not in the token map, raise an error
- if not self.optimize:
+ if not self.lexoptimize:
if not self.lextokens.has_key(newtok.type):
raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.func_code.co_filename, func.func_code.co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
+ else:
+ # No match, see if in literals
+ if lexdata[lexpos] in self.lexliterals:
+ tok = LexToken()
+ tok.value = lexdata[lexpos]
+ tok.lineno = self.lineno
+ tok.type = tok.value
+ tok.lexpos = lexpos
+ self.lexpos = lexpos + 1
+ return tok
- # No match. Call t_error() if defined.
- if self.lexerrorf:
- tok = LexToken()
- tok.value = self.lexdata[lexpos:]
- tok.lineno = self.lineno
- tok.type = "error"
- tok.lexer = self
- oldpos = lexpos
- newtok = self.lexerrorf(tok)
- lexpos += getattr(tok,"_skipn",0)
- if oldpos == lexpos:
- # Error method didn't change text position at all. This is an error.
+ # No match. Call t_error() if defined.
+ if self.lexerrorf:
+ tok = LexToken()
+ tok.value = self.lexdata[lexpos:]
+ tok.lineno = self.lineno
+ tok.type = "error"
+ tok.lexer = self
+ tok.lexpos = lexpos
self.lexpos = lexpos
- raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
- if not newtok: continue
- self.lexpos = lexpos
- return newtok
+ newtok = self.lexerrorf(tok)
+ if lexpos == self.lexpos:
+ # Error method didn't change text position at all. This is an error.
+ raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
+ lexpos = self.lexpos
+ if not newtok: continue
+ return newtok
- self.lexpos = lexpos
- raise LexError, ("No match found", lexdata[lexpos:])
+ self.lexpos = lexpos
+ raise LexError, ("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
- # No more input data
self.lexpos = lexpos + 1
+ if self.lexdata is None:
+ raise RuntimeError, "No input string given with input()"
return None
-
# -----------------------------------------------------------------------------
-# validate_file()
+# _validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
-# match on each line in the filename.
+# match on each line in the given file. If the file can't be located or opened,
+# a true result is returned by default.
# -----------------------------------------------------------------------------
-def validate_file(filename):
+def _validate_file(filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea what the file is. Return OK
lines = f.readlines()
f.close()
except IOError:
- return 1 # Oh well
+ return 1 # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
+
counthash = { }
linen = 1
noerror = 1
if not prev:
counthash[name] = linen
else:
- print "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
+ print >>sys.stderr, "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
noerror = 0
linen += 1
return noerror
# -----------------------------------------------------------------------------
-# _read_lextab(module)
+# _funcs_to_names()
#
-# Reads lexer table from a lextab file instead of using introspection.
+# Given a list of regular expression functions, this converts it to a list
+# suitable for output to a table file
# -----------------------------------------------------------------------------
-def _read_lextab(lexer, fdict, module):
- exec "import %s as lextab" % module
- lexer.lexre = re.compile(lextab._lexre, re.VERBOSE)
- lexer.lexindexfunc = lextab._lextab
- for i in range(len(lextab._lextab)):
- t = lexer.lexindexfunc[i]
- if t:
- if t[0]:
- lexer.lexindexfunc[i] = (fdict[t[0]],t[1])
- lexer.lextokens = lextab._lextokens
- lexer.lexignore = lextab._lexignore
- if lextab._lexerrorf:
- lexer.lexerrorf = fdict[lextab._lexerrorf]
+def _funcs_to_names(funclist,namelist):
+ result = []
+ for f,name in zip(funclist,namelist):
+ if f and f[0]:
+ result.append((name, f[1]))
+ else:
+ result.append(f)
+ return result
+
+# -----------------------------------------------------------------------------
+# _names_to_funcs()
+#
+# Given a list of regular expression function names, this converts it back to
+# functions.
+# -----------------------------------------------------------------------------
+
+def _names_to_funcs(namelist,fdict):
+ result = []
+ for n in namelist:
+ if n and n[0]:
+ result.append((fdict[n[0]],n[1]))
+ else:
+ result.append(n)
+ return result
+
+# -----------------------------------------------------------------------------
+# _form_master_re()
+#
+# This function takes a list of all of the regex components and attempts to
+# form the master regular expression. Given limitations in the Python re
+# module, it may be necessary to break the master regex into separate expressions.
+# -----------------------------------------------------------------------------
+
+def _form_master_re(relist,reflags,ldict,toknames):
+ if not relist: return []
+ regex = "|".join(relist)
+ try:
+ lexre = re.compile(regex,re.VERBOSE | reflags)
+
+ # Build the index to function map for the matching engine
+ lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
+ lexindexnames = lexindexfunc[:]
+
+ for f,i in lexre.groupindex.items():
+ handle = ldict.get(f,None)
+ if type(handle) in (types.FunctionType, types.MethodType):
+ lexindexfunc[i] = (handle,toknames[f])
+ lexindexnames[i] = f
+ elif handle is not None:
+ lexindexnames[i] = f
+ if f.find("ignore_") > 0:
+ lexindexfunc[i] = (None,None)
+ else:
+ lexindexfunc[i] = (None, toknames[f])
+ return [(lexre,lexindexfunc)],[regex],[lexindexnames]
+ except Exception,e:
+ m = int(len(relist)/2)
+ if m == 0: m = 1
+ llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
+ rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
+ return llist+rlist, lre+rre, lnames+rnames
+
+# -----------------------------------------------------------------------------
+# def _statetoken(s,names)
+#
+# Given a declaration name s of the form "t_" and a dictionary whose keys are
+# state names, this function returns a tuple (states,tokenname) where states
+# is a tuple of state names and tokenname is the name of the token. For example,
+# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
+# -----------------------------------------------------------------------------
+
+def _statetoken(s,names):
+ nonstate = 1
+ parts = s.split("_")
+ for i in range(1,len(parts)):
+ if not names.has_key(parts[i]) and parts[i] != 'ANY': break
+ if i > 1:
+ states = tuple(parts[1:i])
+ else:
+ states = ('INITIAL',)
+
+ if 'ANY' in states:
+ states = tuple(names.keys())
+
+ tokenname = "_".join(parts[i:])
+ return (states,tokenname)
+
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
-def lex(module=None,debug=0,optimize=0,lextab="lextab"):
+def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir=""):
+ global lexer
ldict = None
- regex = ""
+ stateinfo = { 'INITIAL' : 'inclusive'}
error = 0
files = { }
- lexer = Lexer()
- lexer.debug = debug
- lexer.optimize = optimize
+ lexobj = Lexer()
+ lexobj.lexdebug = debug
+ lexobj.lexoptimize = optimize
global token,input
-
+
+ if nowarn: warn = 0
+ else: warn = 1
+
+ if object: module = object
+
if module:
- if not isinstance(module, types.ModuleType):
- raise ValueError,"Expected a module"
-
- ldict = module.__dict__
-
+ # User supplied a module object.
+ if isinstance(module, types.ModuleType):
+ ldict = module.__dict__
+ elif isinstance(module, _INSTANCETYPE):
+ _items = [(k,getattr(module,k)) for k in dir(module)]
+ ldict = { }
+ for (i,v) in _items:
+ ldict[i] = v
+ else:
+ raise ValueError,"Expected a module or instance"
+ lexobj.lexmodule = module
+
else:
# No module given. We might be able to get information from the caller.
try:
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
- f = f.f_back # Walk out to our calling function
- ldict = f.f_globals # Grab its globals dictionary
+ f = f.f_back # Walk out to our calling function
+ if f.f_globals is f.f_locals: # Collect global and local variations from caller
+ ldict = f.f_globals
+ else:
+ ldict = f.f_globals.copy()
+ ldict.update(f.f_locals)
if optimize and lextab:
try:
- _read_lextab(lexer,ldict, lextab)
- if not lexer.lexignore: lexer.lexignore = ""
- token = lexer.token
- input = lexer.input
- return lexer
-
+ lexobj.readtab(lextab,ldict)
+ token = lexobj.token
+ input = lexobj.input
+ lexer = lexobj
+ return lexobj
+
except ImportError:
pass
-
- # Get the tokens map
+
+ # Get the tokens, states, and literals variables (if any)
+
tokens = ldict.get("tokens",None)
+ states = ldict.get("states",None)
+ literals = ldict.get("literals","")
+
if not tokens:
raise SyntaxError,"lex: module does not define 'tokens'"
+
if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
raise SyntaxError,"lex: tokens must be a list or tuple."
# Build a dictionary of valid token names
- lexer.lextokens = { }
+ lexobj.lextokens = { }
if not optimize:
-
- # Utility function for verifying tokens
- def is_identifier(s):
- for c in s:
- if not (c.isalnum() or c == '_'): return 0
- return 1
-
for n in tokens:
- if not is_identifier(n):
- print "lex: Bad token name '%s'" % n
+ if not _is_identifier.match(n):
+ print >>sys.stderr, "lex: Bad token name '%s'" % n
error = 1
- if lexer.lextokens.has_key(n):
- print "lex: Warning. Token '%s' multiply defined." % n
- lexer.lextokens[n] = None
+ if warn and lexobj.lextokens.has_key(n):
+ print >>sys.stderr, "lex: Warning. Token '%s' multiply defined." % n
+ lexobj.lextokens[n] = None
else:
- for n in tokens: lexer.lextokens[n] = None
-
+ for n in tokens: lexobj.lextokens[n] = None
if debug:
- print "lex: tokens = '%s'" % lexer.lextokens.keys()
+ print "lex: tokens = '%s'" % lexobj.lextokens.keys()
+
+ try:
+ for c in literals:
+ if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)) or len(c) > 1:
+ print >>sys.stderr, "lex: Invalid literal %s. Must be a single character" % repr(c)
+ error = 1
+ continue
- # Get a list of symbols with the t_ prefix
- tsymbols = [f for f in ldict.keys() if f[:2] == 't_']
+ except TypeError:
+ print >>sys.stderr, "lex: Invalid literals specification. literals must be a sequence of characters."
+ error = 1
+
+ lexobj.lexliterals = literals
+
+ # Build statemap
+ if states:
+ if not (isinstance(states,types.TupleType) or isinstance(states,types.ListType)):
+ print >>sys.stderr, "lex: states must be defined as a tuple or list."
+ error = 1
+ else:
+ for s in states:
+ if not isinstance(s,types.TupleType) or len(s) != 2:
+ print >>sys.stderr, "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s)
+ error = 1
+ continue
+ name, statetype = s
+ if not isinstance(name,types.StringType):
+ print >>sys.stderr, "lex: state name %s must be a string" % repr(name)
+ error = 1
+ continue
+ if not (statetype == 'inclusive' or statetype == 'exclusive'):
+ print >>sys.stderr, "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name
+ error = 1
+ continue
+ if stateinfo.has_key(name):
+ print >>sys.stderr, "lex: state '%s' already defined." % name
+ error = 1
+ continue
+ stateinfo[name] = statetype
+
+ # Get a list of symbols with the t_ or s_ prefix
+ tsymbols = [f for f in ldict.keys() if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
- fsymbols = [ ]
- ssymbols = [ ]
+
+ funcsym = { } # Symbols defined as functions
+ strsym = { } # Symbols defined as strings
+ toknames = { } # Mapping of symbols to token names
+
+ for s in stateinfo.keys():
+ funcsym[s] = []
+ strsym[s] = []
+
+ ignore = { } # Ignore strings by state
+ errorf = { } # Error functions by state
+
+ if len(tsymbols) == 0:
+ raise SyntaxError,"lex: no rules of the form t_rulename are defined."
+
for f in tsymbols:
- if isinstance(ldict[f],types.FunctionType):
- fsymbols.append(ldict[f])
- elif isinstance(ldict[f],types.StringType):
- ssymbols.append((f,ldict[f]))
+ t = ldict[f]
+ states, tokname = _statetoken(f,stateinfo)
+ toknames[f] = tokname
+
+ if callable(t):
+ for s in states: funcsym[s].append((f,t))
+ elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)):
+ for s in states: strsym[s].append((f,t))
else:
- print "lex: %s not defined as a function or string" % f
+ print >>sys.stderr, "lex: %s not defined as a function or string" % f
error = 1
-
+
# Sort the functions by line number
- fsymbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno))
+ for f in funcsym.values():
+ f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno))
# Sort the strings by regular expression length
- ssymbols.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
-
- # Check for non-empty symbols
- if len(fsymbols) == 0 and len(ssymbols) == 0:
- raise SyntaxError,"lex: no rules of the form t_rulename are defined."
+ for s in strsym.values():
+ s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
- # Add all of the rules defined with actions first
- for f in fsymbols:
-
- line = f.func_code.co_firstlineno
- file = f.func_code.co_filename
- files[file] = None
+ regexs = { }
- if not optimize:
- if f.func_code.co_argcount > 1:
- print "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
- error = 1
- continue
+ # Build the master regular expressions
+ for state in stateinfo.keys():
+ regex_list = []
- if f.func_code.co_argcount < 1:
- print "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
- error = 1
- continue
+ # Add rules defined by functions first
+ for fname, f in funcsym[state]:
+ line = f.func_code.co_firstlineno
+ file = f.func_code.co_filename
+ files[file] = None
+ tokname = toknames[fname]
- if f.__name__ == 't_ignore':
- print "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
- error = 1
+ ismethod = isinstance(f, types.MethodType)
+
+ if not optimize:
+ nargs = f.func_code.co_argcount
+ if ismethod:
+ reqargs = 2
+ else:
+ reqargs = 1
+ if nargs > reqargs:
+ print >>sys.stderr, "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
+ error = 1
+ continue
+
+ if nargs < reqargs:
+ print >>sys.stderr, "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
+ error = 1
+ continue
+
+ if tokname == 'ignore':
+ print >>sys.stderr, "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
+ error = 1
+ continue
+
+ if tokname == 'error':
+ errorf[state] = f
continue
-
- if f.__name__ == 't_error':
- lexer.lexerrorf = f
- continue
- if f.__doc__:
+ if f.__doc__:
+ if not optimize:
+ try:
+ c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | reflags)
+ if c.match(""):
+ print >>sys.stderr, "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__)
+ error = 1
+ continue
+ except re.error,e:
+ print >>sys.stderr, "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
+ if '#' in f.__doc__:
+ print >>sys.stderr, "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__)
+ error = 1
+ continue
+
+ if debug:
+ print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state)
+
+ # Okay. The regular expression seemed okay. Let's append it to the master regular
+ # expression we're building
+
+ regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
+ else:
+ print >>sys.stderr, "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
+
+ # Now add all of the simple rules
+ for name,r in strsym[state]:
+ tokname = toknames[name]
+
+ if tokname == 'ignore':
+ if "\\" in r:
+ print >>sys.stderr, "lex: Warning. %s contains a literal backslash '\\'" % name
+ ignore[state] = r
+ continue
+
if not optimize:
+ if tokname == 'error':
+ raise SyntaxError,"lex: Rule '%s' must be defined as a function" % name
+ error = 1
+ continue
+
+ if not lexobj.lextokens.has_key(tokname) and tokname.find("ignore_") < 0:
+ print >>sys.stderr, "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname)
+ error = 1
+ continue
try:
- c = re.compile(f.__doc__, re.VERBOSE)
+ c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags)
+ if (c.match("")):
+ print >>sys.stderr, "lex: Regular expression for rule '%s' matches empty string." % name
+ error = 1
+ continue
except re.error,e:
- print "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
+ print >>sys.stderr, "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
+ if '#' in r:
+ print >>sys.stderr, "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name
+
error = 1
continue
-
if debug:
- print "lex: Adding rule %s -> '%s'" % (f.__name__,f.__doc__)
+ print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state)
- # Okay. The regular expression seemed okay. Let's append it to the master regular
- # expression we're building
-
- if (regex): regex += "|"
- regex += "(?P<%s>%s)" % (f.__name__,f.__doc__)
- else:
- print "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
+ regex_list.append("(?P<%s>%s)" % (name,r))
- # Now add all of the simple rules
- for name,r in ssymbols:
+ if not regex_list:
+ print >>sys.stderr, "lex: No rules defined for state '%s'" % state
+ error = 1
+
+ regexs[state] = regex_list
- if name == 't_ignore':
- lexer.lexignore = r
- continue
-
- if not optimize:
- if name == 't_error':
- raise SyntaxError,"lex: Rule 't_error' must be defined as a function"
- error = 1
- continue
-
- if not lexer.lextokens.has_key(name[2:]):
- print "lex: Rule '%s' defined for an unspecified token %s." % (name,name[2:])
- error = 1
- continue
- try:
- c = re.compile(r,re.VERBOSE)
- except re.error,e:
- print "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
- error = 1
- continue
- if debug:
- print "lex: Adding rule %s -> '%s'" % (name,r)
-
- if regex: regex += "|"
- regex += "(?P<%s>%s)" % (name,r)
if not optimize:
for f in files.keys():
- if not validate_file(f):
+ if not _validate_file(f):
error = 1
- try:
- if debug:
- print "lex: regex = '%s'" % regex
- lexer.lexre = re.compile(regex, re.VERBOSE)
- # Build the index to function map for the matching engine
- lexer.lexindexfunc = [ None ] * (max(lexer.lexre.groupindex.values())+1)
- for f,i in lexer.lexre.groupindex.items():
- handle = ldict[f]
- if isinstance(handle,types.FunctionType):
- lexer.lexindexfunc[i] = (handle,handle.__name__[2:])
- else:
- # If rule was specified as a string, we build an anonymous
- # callback function to carry out the action
- lexer.lexindexfunc[i] = (None,f[2:])
-
- # If a lextab was specified, we create a file containing the precomputed
- # regular expression and index table
-
- if lextab and optimize:
- lt = open(lextab+".py","w")
- lt.write("# %s.py. This file automatically created by PLY. Don't edit.\n" % lextab)
- lt.write("_lexre = %s\n" % repr(regex))
- lt.write("_lextab = [\n");
- for i in range(0,len(lexer.lexindexfunc)):
- t = lexer.lexindexfunc[i]
- if t:
- if t[0]:
- lt.write(" ('%s',%s),\n"% (t[0].__name__, repr(t[1])))
- else:
- lt.write(" (None,%s),\n" % repr(t[1]))
- else:
- lt.write(" None,\n")
-
- lt.write("]\n");
- lt.write("_lextokens = %s\n" % repr(lexer.lextokens))
- lt.write("_lexignore = %s\n" % repr(lexer.lexignore))
- if (lexer.lexerrorf):
- lt.write("_lexerrorf = %s\n" % repr(lexer.lexerrorf.__name__))
- else:
- lt.write("_lexerrorf = None\n")
- lt.close()
-
- except re.error,e:
- print "lex: Fatal error. Unable to compile regular expression rules. %s" % e
- error = 1
if error:
raise SyntaxError,"lex: Unable to build lexer."
- if not lexer.lexerrorf:
- print "lex: Warning. no t_error rule is defined."
- if not lexer.lexignore: lexer.lexignore = ""
-
+ # From this point forward, we're reasonably confident that we can build the lexer.
+ # No more errors will be generated, but there might be some warning messages.
+
+ # Build the master regular expressions
+
+ for state in regexs.keys():
+ lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,toknames)
+ lexobj.lexstatere[state] = lexre
+ lexobj.lexstateretext[state] = re_text
+ lexobj.lexstaterenames[state] = re_names
+ if debug:
+ for i in range(len(re_text)):
+ print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i])
+
+ # For inclusive states, we need to add the INITIAL state
+ for state,type in stateinfo.items():
+ if state != "INITIAL" and type == 'inclusive':
+ lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
+ lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
+ lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
+
+ lexobj.lexstateinfo = stateinfo
+ lexobj.lexre = lexobj.lexstatere["INITIAL"]
+ lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
+
+ # Set up ignore variables
+ lexobj.lexstateignore = ignore
+ lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
+
+ # Set up error functions
+ lexobj.lexstateerrorf = errorf
+ lexobj.lexerrorf = errorf.get("INITIAL",None)
+ if warn and not lexobj.lexerrorf:
+ print >>sys.stderr, "lex: Warning. no t_error rule is defined."
+
+ # Check state information for ignore and error rules
+ for s,stype in stateinfo.items():
+ if stype == 'exclusive':
+ if warn and not errorf.has_key(s):
+ print >>sys.stderr, "lex: Warning. no error rule is defined for exclusive state '%s'" % s
+ if warn and not ignore.has_key(s) and lexobj.lexignore:
+ print >>sys.stderr, "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s
+ elif stype == 'inclusive':
+ if not errorf.has_key(s):
+ errorf[s] = errorf.get("INITIAL",None)
+ if not ignore.has_key(s):
+ ignore[s] = ignore.get("INITIAL","")
+
+
# Create global versions of the token() and input() functions
- token = lexer.token
- input = lexer.input
-
- return lexer
+ token = lexobj.token
+ input = lexobj.input
+ lexer = lexobj
+
+ # If in optimize mode, we write the lextab
+ if lextab and optimize:
+ lexobj.writetab(lextab,outputdir)
+
+ return lexobj
# -----------------------------------------------------------------------------
-# run()
+# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
_token = lexer.token
else:
_token = token
-
+
while 1:
tok = _token()
if not tok: break
- print "(%s,'%s',%d)" % (tok.type, tok.value, tok.lineno)
-
-
+ print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)
+
+
+# -----------------------------------------------------------------------------
+# @TOKEN(regex)
+#
+# This decorator function can be used to set the regex expression on a function
+# when its docstring might need to be set in an alternative way
+# -----------------------------------------------------------------------------
+
+def TOKEN(r):
+ def set_doc(f):
+ if callable(r):
+ f.__doc__ = r.__doc__
+ else:
+ f.__doc__ = r
+ return f
+ return set_doc
+# Alternative spelling of the TOKEN decorator
+Token = TOKEN