1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
|
"""parser.py --- A small scheme parser.
Danny Yoo (dyoo@hkn.eecs.berkeley.edu)
This parser might be useful if we wanted to grab information from the
user using Schemeish lists. It might also be nice if one is planning
to write a Scheme interpreter in Python. *cough*
The main functions to use in this module are
tokenize(): makes a list of tokens out of a string
parse(s): parse string s, and return a either an atomic value
(symbol or number) or a pair.
If anything bad happens during the parsing, we'll raise a ParserError.
Some special notes: this module should behave well even under unusual
input: there should be no possibility of hitting the recursion limit,
since we are using trampolined style.
"""
__license__ = "MIT License"
import re
from symbol import Symbol
import pair
import pogo
"""The End Of File token is a sentinal that terminates a list of tokens."""
EOF_TOKEN = (None, None)
"""Here are the patterns we pay attention when breaking down a string
into tokens."""
PATTERNS = [ ('whitespace', re.compile(r'(\s+)')),
('comment', re.compile(r'(;[^\n]*)')),
('(', re.compile(r'(\()')),
(')', re.compile(r'(\))')),
('number', re.compile(r'''( [+\-]? ## optional sign,
(?: ## followed by some
## decimals
\d+\.\d+
| \d+\.
| \.\d+
| \d+
)
)
''',
re.VERBOSE)),
('symbol',
re.compile(r'''([a-zA-Z\+\=\?\!\@\#\$\%\^\&\*\-\/\.\>\<]
[\w\+\=\?\!\@\#\$\%\^\&\*\-\/\.\>\<]*)''',
re.VERBOSE)),
('string', re.compile(r'''
"
(([^\"] | \\")*)
"
''',
re.VERBOSE)),
('\'', re.compile(r'(\')')),
('`', re.compile(r'(`)')),
## fixme: add UNQUOTE-SPLICING form as well
(',', re.compile(r'(,)')),
]
def tokenize(s):
"""Given a string 's', return a list of its tokens. A token can
be one of the following types listed in the PATTERNS above, and
each token is a 2-tuple: (type, content)."""
tokens = []
while 1:
should_continue = 0
for tokenType, regex in PATTERNS:
match_obj = regex.match(s)
if match_obj:
should_continue = 1
tokens.append( (tokenType, match_obj.group(1)) )
s = s[match_obj.span()[1] :]
if should_continue == 0:
break
tokens.append(EOF_TOKEN)
return filter(lambda x: x[0] not in ('whitespace', 'comment'),
tokens)
"""With the lexer done, now let's direct our attention to the parser."""
######################################################################
class ParserError(Exception):
"""Our personalized exception class."""
pass
def peek(tokens):
"""Take a quick glance at the first token in our tokens list."""
if len(tokens) == 0:
raise ParserError, "While peeking: ran out of tokens."
return tokens[0]
def eat(tokens, tokenType):
"""Digest the first token in our tokens list, making sure that we're
biting on the right tokenType of thing."""
if len(tokens) == 0:
raise ParserError, "While trying to eat %s: ran out of tokens." % \
(repr(tokenType),)
if tokens[0][0] != tokenType:
raise ParserError, "While trying to eat %s: got token %s instead." % \
(repr(tokenType), repr(tokens[0]))
return tokens.pop(0)
######################################################################
def identity_cont(val):
return pogo.land(val)
"""Below is our parser for symbol-lists. We'll use a recursive
descent parsing technique, since Scheme's syntax is so well suited for
it. Actually, it's a bit more complicated than this.
There is one major problem with a naive rec-descent approach: with
unusual input, we can run into Python's recursion stack limit. To get
around this, we first wrote the parser normally, and then attacked the
recursion using CPS and trampolining style. The result is that the
parser works, even with evil input, but it's a bit harder to read.
"""
def parseSingleExpression(tokens, cont=identity_cont):
"""Returns a single Expression, given a sequence of tokens.
Raises a ParserException if our tokens haven't been exhausted."""
def c(expression):
eat(tokens, None)
return pogo.bounce(cont, expression)
return parseExpression(tokens, c)
def parseExpression(tokens, cont=identity_cont):
"""Returns an Expression, given a sequence of tokens.
An expression is made up of one of the following things:
o A quoted expression
o An atom (like a number or symbol or string)
o A list.
This procedure tries to take care of all these potentials."""
look_ahead_type = peek(tokens)[0]
def make_c(quoteType):
def c(expr):
return pogo.bounce(cont, pair.list(Symbol(quoteType), expr))
return c
if look_ahead_type == '\'':
eat(tokens, '\'')
return parseExpression(tokens, make_c('quote'))
if look_ahead_type == '`':
eat(tokens, '`')
return parseExpression(tokens, make_c('quasiquote'))
elif look_ahead_type == ',':
eat(tokens, ',')
return parseExpression(tokens, make_c('unquote'))
elif look_ahead_type == '(':
return parseList(tokens, cont)
elif look_ahead_type in ('number', 'symbol', 'string'):
return parseAtom(tokens, cont)
else:
raise ParserError, "While parsing Expression: no alternatives."
def parseAtom(tokens, cont):
"""Returns an Atom, given a sequence of tokens.
An atom is either a number, a symbol, or a string."""
if peek(tokens)[0] == 'number':
return pogo.bounce(cont, toNumber(eat(tokens, 'number')[1]))
elif peek(tokens)[0] == 'symbol':
return pogo.bounce(cont, Symbol(eat(tokens, 'symbol')[1]))
elif peek(tokens)[0] == 'string':
return pogo.bounce(cont, eat(tokens, 'string')[1])
else:
raise ParserError, "While parsing Atom: no alternatives."
def toNumber(s):
"""Tries to convert string 's' into a number."""
try:
return int(s)
except ValueError:
return float(s)
def parseList(tokens, cont):
"""Parses a parenthesized list expression."""
eat(tokens, "(")
def c_expressionsEaten(val):
eat(tokens, ")")
return pogo.bounce(cont, val)
return pogo.bounce(parseExpressionStar, tokens, c_expressionsEaten)
def parseExpressionStar(tokens, cont):
"""Tries to eat as many expressions as it can see."""
START_EXPR_SET = ('\'', '`', ',', '(', 'number', 'symbol', 'string')
if peek(tokens) == ('symbol', '.'):
## Handle dotted pairs
eat(tokens, "symbol")
return pogo.bounce(parseExpression, tokens, cont)
elif peek(tokens)[0] in START_EXPR_SET:
def c_first_eaten(firstVal):
def c_rest_eaten(restVal):
return pogo.bounce(cont, pair.cons(firstVal, restVal))
return pogo.bounce(parseExpressionStar, tokens, c_rest_eaten)
return pogo.bounce(parseExpression, tokens, c_first_eaten)
else:
return pogo.bounce(cont, pair.NIL)
def parse(s):
"""Parse a single string. This is just a convenience function."""
return pogo.pogo(parseSingleExpression(tokenize(s),
identity_cont))
######################################################################
try:
import unittest
class ParserTests(unittest.TestCase):
def testAtomParsing(self):
self.assertEquals(42, parse("42"))
self.assertEquals("particle-man", parse('"particle-man"'))
def testBrokenNegativeExampleFromBaypiggies(self):
## embarassing case that didn't work during the Baypiggies meeting
## on September 9, 2004. Doh!
self.assertEquals(-42, parse("-42"))
def testLists(self):
self.assertEquals(pair.list(1, 2), parse("(1 2)"))
self.assertEquals(pair.list(1, 2, pair.list(3), 4),
parse("(1 2 (3) 4)"))
self.assertEquals(pair.list(pair.list(pair.list())),
parse("((()))"))
def testQuotation(self):
self.assertEquals(pair.list(Symbol("quote"),
Symbol("atom-man")),
parse("'atom-man"))
self.assertEquals(pair.list(Symbol("quasiquote"),
Symbol("istanbul")),
parse("`istanbul"))
self.assertEquals(pair.list(Symbol("unquote"),
Symbol("constantinople")),
parse(",constantinople"))
def testDottedPair(self):
cons = pair.cons ## shortcut
self.assertEquals(cons(Symbol("alpha"), Symbol("beta")),
parse("(alpha . beta)"))
self.assertEquals(cons(Symbol("a"),
cons(Symbol("b"),
cons(cons(Symbol("c"), Symbol("d")),
pair.NIL))),
parse("(a b (c . d))"))
def testQuotedList(self):
self.assertEquals(pair.list(Symbol("quote"),
pair.list(Symbol("foo"),
Symbol("bar"))),
parse("'(foo bar)"))
def testEmptyList(self):
self.assertEquals(pair.NIL, parse("()"))
def testStressWithSuperNesting(self):
## An evil test to see if this raises bad errors.
N = 1000
bigNestedList = pair.list()
for ignored in xrange(N-1):
bigNestedList = pair.list(bigNestedList)
try:
self.assertEquals(bigNestedList,
parse( "(" * N + ")" * N))
except RuntimeError, e:
self.fail(e)
if __name__ == '__main__':
unittest.main()
except ImportError:
pass
|