diff -Nru /tmp/iaU3nvy5bu/yappy-1.7/debian/changelog /tmp/mJzC9yN4dz/yappy-1.8/debian/changelog --- /tmp/iaU3nvy5bu/yappy-1.7/debian/changelog 2007-08-24 18:11:11.000000000 +0200 +++ /tmp/mJzC9yN4dz/yappy-1.8/debian/changelog 2007-08-24 18:11:11.000000000 +0200 @@ -1,3 +1,22 @@ +yappy (1.8-1ubuntu1) gutsy; urgency=low + + * Merge from Debian unstable (LP: #134552). Remaining Ubuntu changes: + - Remove pycompat file + - Add pyversions file + - Remove dh_python from debian/rules + - debian/control: + + Remove Architecture: any from source package + + Bump python-support version to >= 0.5.3 + + Update maintainer per spec + + -- Cesare Tirabassi Fri, 24 Aug 2007 18:04:03 +0200 + +yappy (1.8-1) unstable; urgency=low + + * New upstream release (a new iterative lexer) + + -- Rogerio Reis Mon, 30 Jul 2007 23:07:10 +0100 + yappy (1.7-1ubuntu1) gutsy; urgency=low * Remove pycompat file diff -Nru /tmp/iaU3nvy5bu/yappy-1.7/demo.py /tmp/mJzC9yN4dz/yappy-1.8/demo.py --- /tmp/iaU3nvy5bu/yappy-1.7/demo.py 2006-07-19 11:35:55.000000000 +0200 +++ /tmp/mJzC9yN4dz/yappy-1.8/demo.py 2007-07-31 00:06:08.000000000 +0200 @@ -11,7 +11,7 @@ #from yappy.parser import * import sys, string -from parser import * +from yappy.parser import * ############## Demos ##################### class SimpleExp(Yappy): @@ -182,7 +182,7 @@ sinal = "[+-]" integer ="\d" tokenize=[("(%s)+"%integer,lambda x: ("id",int(x))), - ("%s"%sinal,lambda x: ("add_op",self.make_op(x)),("add_op",100,'left')), + (sinal,lambda x: ("add_op",self.make_op(x)),("add_op",100,'left')), ("[*/]",lambda x: ("mul_op",self.make_op(x)),("mul_op",200,'left')), ("\(|\)",lambda x: (x,x)) ] @@ -228,8 +228,9 @@ """A parser for transforming a list atrib=value into a python dictionary """ def __init__(self,no_table=0, table='Listavg.tab'): grammar = """ - E -> ( ) {{self.EmptyDict}} | ( AVL ) {{self.ParSemRule}} ; - AVL -> AV , AVL {{DefaultSemRule}} | AV {{EmptySemRule}} ; + E -> ( ) {{self.EmptyDict}}; + E -> ( AVL ) {{self.ParSemRule}} ; + AVL -> AV , AVL | AV {{EmptySemRule}} ; AV -> tok = tok {{ self.AddItem }}; """ @@ -272,7 +273,7 @@ grammar = """ E -> ( ) {{self.EmptyDict}}; E -> ( AVL ) {{self.ParSemRule}} ; - AVL -> AV , AVL {{DefaultSemRule}} | AV {{EmptySemRule}} ; + AVL -> AV , AVL | AV {{EmptySemRule}} ; AV -> tok = tok {{ self.AddItem }}; """ tokenize = [ @@ -318,7 +319,7 @@ rules are dummy...""" grammar = grules([("r -> r | r",self.OrSemRule), ("r -> r . r",self.ConcatSemRule), - ("r -> r *",self.StarSemRule), + ("r -> r *",self.StarSemRule, (300,'left')), ("r -> ( r )",self.ParSemRule), ("r -> id",self.BaseSemRule), ]) @@ -326,7 +327,7 @@ ("[A-Za-z0-9]",lambda x: ("id",x)), ("[+|]",lambda x: ("|",x),("|",100,'left')), ("[\.]",lambda x: (".",""),(".",200,'left')), - ("[*]",lambda x: (x,x),("*",300,'left')), + ("[*]",lambda x: (x,x), ("*",300,'left')), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table,tabletype) @@ -349,6 +350,10 @@ def test(self): st = ["(a+b)*.a.a.b*", "a+a.b+a.b.(a+a)*", + "a+a.b+a.(a+a)**", + "a+a.b.c", + "a+a.b.(c+b)", + "a+a.b.(c+b)*", "a+a.b*.(a+b)"] for i in st: print "Input: %s" %i @@ -367,7 +372,7 @@ tokenize =[ ("[A-Za-z0-9]",lambda x: ("id",x)), ("[+|]",lambda x: ("+",x),("+",100,'left')), - ("[*]",lambda x: (x,x),("*",300,'left')), + ("[*]",lambda x: (x,x)), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table,tabletype,noconflicts,expect) @@ -379,6 +384,10 @@ st = ["(a+b)*aab*", "(a+ab)*a*", "(a+a)a+ab", + "a+ab+(a(a+a)*)*", + "a+ab+a(a+a)**", + "(a+a)**ab(a+b)**", + "aa+bb**", "(a+ab)(a+ab)(ac+a)", "a+abc+ad", "abc+b+ad", @@ -392,7 +401,9 @@ "a+c+ab(a+b)", "a+c+(a+b)ab", "a+b*", - "aa+b*" + "aa+b*", + "aab*ab+a*+aa", + "aab*ab**+(a+aa)**" ] for i in st: print "Input: %s" %i @@ -412,7 +423,7 @@ ("@empty_set",lambda x: ("id",x)), ("[A-Za-z0-9]",lambda x: ("id",x)), ("[+|]",lambda x: ("+",x),("+",100,'left')), - ("[*]",lambda x: (x,x),("*",300,'left')), + ("[*]",lambda x: (x,x)), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table) diff -Nru /tmp/iaU3nvy5bu/yappy-1.7/parser.py /tmp/mJzC9yN4dz/yappy-1.8/parser.py --- /tmp/iaU3nvy5bu/yappy-1.7/parser.py 2006-07-19 11:35:55.000000000 +0200 +++ /tmp/mJzC9yN4dz/yappy-1.8/parser.py 2007-07-31 00:06:08.000000000 +0200 @@ -10,7 +10,7 @@ It currently builds C{SLR}, C{LR(1)} and C{LALR(1)} parsing tables. Copyright (C) 2000-2003 Rogério Reis & Nelma Moreira {rvr,nam}@ncc.up.pt -Version: $Id: parser.py,v 1.17 2006-07-19 08:21:27 nam Exp $ +Version: $Id: parser.py,v 1.18 2006-07-19 09:52:06 rvr Exp $ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -92,20 +92,18 @@ rex = r[0] funct = r[1] except IndexError: - raise LexicalError(rnumber) + raise LexicalError(rnumber,r) try: rec = re.compile(rex) except TypeError: raise LexicalRulesErrorRE(rex,rnumber) try: - op,prec,assoc = r[2] - if not self.__dict__.has_key("operators"): + op,prec,assoc = r[2] + if not self.__dict__.has_key("operators"): self.operators = {} - if not self.operators.has_key(op): + if not self.operators.has_key(op): self.operators[op] = (prec,assoc) except IndexError: pass -# print "Error in rule %s\n", rnumber -# raise LexicalError(rnumber,r) self.rules.append((rec,funct)) rnumber = rnumber + 1 @@ -117,62 +115,56 @@ @return: a list of tokens (pairs C{(TOKEN , SPEcial_VALUE )}), for recognized elements and C{("@UNK", string )} for the others""" - st = string + st = [string] for r in self.rules: st = self.scanOneRule(r,st) - st = self.scanUnknown(st) - return self.rebuild(st) + return self.scanUnknown(st) def scanOneRule(self,rule,st): """Scans space C{st} according only one rule @param rule: one rule C{(re,fun,op)} - @param st: can be a string or a more complex structure product of previous - scans steps ( an tuple C{(match, left, right)}) + @param st: is a list of strings and already matched structures """ - #re, fun = rule re = rule[0] fun = rule[1] - if isinstance(st, StringType): - if st == "": return st - m = re.search(st) - if not m: return st + st1 = [] + for s in st: + if not isinstance(s, StringType): + st1.append(s) else: - if m.start() == 0: left = "" - else: left = st[0:m.start()] - if m.end() == len(st): right = "" - else: right = st[m.end():] - if fun == "": - return ("",left, self.scanOneRule(rule,right)) - return (apply(fun,[st[m.start():m.end()]]),left, - self.scanOneRule(rule,right)) - else: # OK so this is already a tuple (match,left,right) - (match, left, right) = st - return (match, self.scanOneRule(rule,left), - self.scanOneRule(rule,right)) + s1 = s + while True: + m = re.search(s1) + if not m: + st1.append(s1) + break + else: + if m.start() != 0: + st1.append(s1[0:m.start()]) + if fun == "": + st1.append(("",s1[m.start():m.end()])) + else: + st1.append(apply(fun,[s1[m.start():m.end()]])) + if m.end() == len(s1): + break + else: + s1 = s1[m.end():] + return st1 def scanUnknown(self,st): """Scans the resulting structure making Unknown strings Unknown parts will be of the form ("@UNK", string ) """ - f = lambda x: ("@UNK",x) - return self.scanOneRule((re.compile(".*"),f),st) - - def rebuild(self,st): - """Re-assembles the structure resulting from scanning as a list. - - @return: a list of tokens (pairs of token-value).""" - if isinstance(st, StringType): # then st is "" - assert st == "", "A non-empty string appears as a branch!" - return [] - else: - (s, left, right) = st - if s != "": - return self.rebuild(left) + [s] + self.rebuild(right) + st1 = [] + for s in st: + if isinstance(s, StringType): + st1.append(("@UNK",s)) else: - return self.rebuild(left) + self.rebuild(right) - + st1.append(s) + return st1 + def readscan(self): """Scans a string read from stdin """ st = raw_input() @@ -190,9 +182,11 @@ class LexicalError(YappyError): """Class for all Yappy Lexical analyser exceptions""" def __init__(self,r,rule): - self.message = 'Error in rule number %s'%(r) - print rule + self.message = 'Error in rule number %s: %s'%(r,rule) + def __str__(self): + return "%s" % (self.message) + class LexicalRulesErrorRE(YappyError): """An error occured parsing the RE part of a lexical rule""" def __init__(self,re,no=0): @@ -200,6 +194,9 @@ self.rule = no self.re = re + def __str__(self): + return "%s" % (self.message) + class GrammarError(YappyError): """Class for input grammar errors """ def __init__(self,rule): @@ -239,7 +236,8 @@ def __init__(self,s,a): self.item = s self.symbol = a - self.message = 'Error in LR: (%s,%s) not found' %(s,a) + self.message = 'Error in LR: (%s,%s) not found' %(self.item,self.symbol) + def __str__(self): return "%s" % (self.message) @@ -715,7 +713,7 @@ if len(self.gr.rules[i]) == 4: self.precedence[i] = self.gr.rules[i][3] else: - self.precedence[i] = 0 + self.precedence[i] = None if self.operators: self.gr.rules[i][1].reverse() for s in self.gr.rules[i][1]: @@ -726,7 +724,7 @@ if _DEBUG: print "Precedence %s" %self.precedence - + def add_action(self,i,a,action,j): """Set C{(action,j)} for state C{i} and symbol C{a} or raise conflict error. Conficts are resolved using the following @@ -764,17 +762,17 @@ """ try: - if self.operators and self.operators.has_key(a): + if self.operators and self.operators.has_key(a) and self.precedence.has_key(r) and self.precedence[r]: prec_op, assoc_op = self.operators[a] - else: - prec_op, assoc_op = (10000,'DUMMY') - if self.precedence.has_key(r): if (self.precedence[r][0] > prec_op) or (self.precedence[r][0] == prec_op and self.precedence[r][1] =='left'): self.ACTION[(i,a)] = ('reduce',r) if _DEBUG: print "solved reduce %s" %r else: self.ACTION[(i,a)] = ('shift',s) if _DEBUG: print "solved shift %s" %s + else: + self.ACTION[(i,a)] = ('shift',s) + if _DEBUG: print "solved shift %s" %s except (AttributeError, TypeError, KeyError,NameError): if self.Log.noconflicts: # choose to shift @@ -1716,7 +1714,7 @@ @ivar lex: a Lexer object """ - def __init__(self,tokenize,grammar, table='YappyTab', no_table=1, + def __init__(self,tokenize,grammar, table='YappyTab',no_table=1, tabletype=LALRtable,noconflicts=1,expect=0,**args): """ @param tokenize: same as for L{Lexer} @@ -1803,10 +1801,7 @@ """ if callable(fun): - try: - return apply(fun,[sargs, context]) - except: - raise SemanticError,'Wrong application: %s %s' %(fun,sargs) + return apply(fun,[sargs, context]) elif type(fun) is StringType: a = expandSemRule("sargs[",fun) l = context.get('locals',{})