Skip to content

Commit

Permalink
fixing metrics & extractors; adding more grammars to the SHARED part
Browse files Browse the repository at this point in the history
git-svn-id: https://slps.svn.sourceforge.net/svnroot/slps@946 ab42f6e0-554d-0410-b580-99e487e6eeb2
  • Loading branch information
grammarware committed Feb 8, 2011
1 parent cd470d9 commit c69cab5
Show file tree
Hide file tree
Showing 3 changed files with 64 additions and 15 deletions.
23 changes: 23 additions & 0 deletions shared/bgf/Makefile
Expand Up @@ -4,12 +4,33 @@ cdir = ../../topics/convergence

all:
make clean
make xpath
make fl
make dsl
make c
make cpp
make csharp
make java

xpath:
cd ${gdir}/xpath && make test
cp ${gdir}/xpath/xpath.bgf xpath.1.ext.bgf
cp ${gdir}/xpath/xpath.fixed.bgf xpath.1.rec.bgf

dsl:
cp ${gdir}/bnf/bnf-bgf.bgf bnf.bgf.bgf
cd ${gdir}/dsl && make build
cp ${gdir}/dsl/bgf/bgf.bgf bgf.xsd.ext.bgf
cp ${gdir}/dsl/btf/btf.bgf btf.xsd.ext.bgf
cp ${gdir}/dsl/lcf/lcf.bgf lcf.xsd.ext.bgf
cp ${gdir}/dsl/ldf/ldf.raw.bgf ldf.xsd.ext.bgf
cp ${gdir}/dsl/ldf/ldf.final.bgf ldf.xsd.rec.bgf
cp ${gdir}/dsl/xbgf/xbgf.bgf xbgf.xsd.ext.bgf
cp ${gdir}/dsl/xldf/xldf.bgf xldf.xsd.ext.bgf
cd ../../topics/extraction/rascal && make
cp ../../topics/extraction/rascal/rascal.bgf rascal.ext.bgf
cp ../../topics/extraction/rascal/rascal.true.bgf rascal.rec.bgf

fl:
cd ${cdir}/fl && make test
cp ${cdir}/fl/snapshot/antlr.bgf fl.antlr.ext.bgf
Expand All @@ -20,6 +41,7 @@ fl:
cp ${cdir}/fl/snapshot/om.bgf fl.java.ext.bgf
cp ${cdir}/fl/snapshot/sdf.bgf fl.sdf.ext.bgf
cp ${cdir}/fl/snapshot/txl.bgf fl.txl.ext.bgf
cp ${cdir}/fl/snapshot/xsd.bgf fl.xsd.ext.bgf
${tdir}/subgrammar fl.antlr.ext.bgf program fl.antlr.rec.bgf
${tdir}/subgrammar fl.dcg.ext.bgf program fl.dcg.rec.bgf
${tdir}/subgrammar fl.ecore.ext.bgf Program fl.ecore.rec.bgf
Expand All @@ -28,6 +50,7 @@ fl:
${tdir}/subgrammar fl.java.ext.bgf Function fl.java.rec.bgf
${tdir}/subgrammar fl.sdf.ext.bgf Program fl.sdf.rec.bgf
${tdir}/subgrammar fl.txl.ext.bgf program fl.txl.rec.bgf
${tdir}/subgrammar fl.xsd.ext.bgf Program fl.xsd.rec.bgf

csharp:
cd ${gdir}/csharp && make build
Expand Down
34 changes: 25 additions & 9 deletions shared/python/metrics.py
Expand Up @@ -60,12 +60,21 @@ def lab(g):
def USED(g):
return len(used(g))
def used(g):
# globally used
nts = []
for p in g.prods:
for n in p.expr.wrapped.getXml().findall('.//nonterminal'):
if n.text not in nts:
nts.append(n.text)
return nts
def nrused(g):
# non-recursively used
nts = []
for p in g.prods:
for n in p.expr.wrapped.getXml().findall('.//nonterminal'):
if (n.text not in nts) and (n.text != p.nt):
nts.append(n.text)
return nts

# DEFD - number of nonterminal symbols defined by the grammar
def DEFD(g):
Expand All @@ -82,7 +91,7 @@ def TOP(g):
return len(top(g))
def top(g):
tops = []
usednts = used(g)
usednts = nrused(g)
for nt in defd(g):
if nt not in usednts:
tops.append(nt)
Expand Down Expand Up @@ -243,10 +252,12 @@ def TIMPI(g):
return impurityOfCallGraph(getCallGraph(g))

def impurityOfCallGraph(cg):
#print cg
n = len(cg)
e = sum(map(len,cg.values()))
#print n,e
if n<2:
return 100
return 0
else:
# Power and Malloy made two mistakes:
# (1) the number of edges in a complete directed graph is n(n-1), not n(n-1)/2, as in a complete undirected graph!
Expand Down Expand Up @@ -329,16 +340,14 @@ def opr(node):
return 1+opr(node.expr)
elif node.__class__.__name__ == 'Expression':
return opr(node.wrapped)
elif node.__class__.__name__ == 'Marked':
return 1+opr(node.data)
elif node.__class__.__name__ in ('Plus','Star','Optional'):
elif node.__class__.__name__ in ('Plus','Star','Optional','Marked'):
return 1+opr(node.data)
elif node.__class__.__name__ in ('Terminal','Nonterminal','Value'):
return 0
elif node.__class__.__name__ in ('Epsilon','Any','Empty'):
return 1
elif node.__class__.__name__ in ('Choice','Sequence'):
return sum(map(opr,node.data))
return sum(map(opr,node.data))+len(node.data)-1
else:
print 'How to deal with',node.__class__.__name__,'?'
return 0
Expand Down Expand Up @@ -377,23 +386,30 @@ def union(a,b):
c.append(x)
return c

def setminus(a,b):
c = a[:]
for x in b:
if x in c:
c.remove(x)
return c

def allOperators(node):
if node.__class__.__name__ == 'Grammar':
return reduce(union,map(allOperators,node.prods),[])
elif node.__class__.__name__ == 'Production':
return allOperators(node.expr)
elif node.__class__.__name__ == 'Selectable':
return union(allOperators(node.expr),node.__class__.__name__)
return union(allOperators(node.expr),[node.__class__.__name__])
elif node.__class__.__name__ == 'Expression':
return allOperators(node.wrapped)
elif node.__class__.__name__ in ('Plus','Star','Optional','Marked'):
return union(allOperators(node.data),node.__class__.__name__)
return union(allOperators(node.data),[node.__class__.__name__])
elif node.__class__.__name__ in ('Terminal','Nonterminal','Value'):
return []
elif node.__class__.__name__ in ('Epsilon','Any','Empty'):
return [node.__class__.__name__]
elif node.__class__.__name__ in ('Choice','Sequence'):
return reduce(union,map(allOperators,node.data),[])
return reduce(union,map(allOperators,node.data),[node.__class__.__name__])
else:
print 'How to deal with',node.__class__.__name__,'?'
return 0
Expand Down
22 changes: 16 additions & 6 deletions topics/extraction/bnf2bgf/pdf2lll.py
Expand Up @@ -34,6 +34,7 @@ def assignNewCurrent(c):
if d not in keys:
keys.append(d)
current = d
#print 'NEW CURRENT:',current

def readBannedLinesList(f):
lst = open(f,'r')
Expand Down Expand Up @@ -75,7 +76,8 @@ def readTerminalsList(f):

oneof = False

def processline(line,current,oneof):
def processline(line,oneof):
global current
rline = line.strip()
if rline == '':
return current,oneof
Expand Down Expand Up @@ -111,6 +113,7 @@ def processline(line,current,oneof):
# grammar[current].append(t)
else:
grammar[current].append(' '.join(processLineTokens(rline)))
#print 'KEYS=',grammar.keys()
return current,oneof

def performReplacements(line):
Expand Down Expand Up @@ -156,13 +159,15 @@ def dumpUnbannedLines(f):
lines.append((cx,line))
print len(lines),'out of',cx,'will be used.'
pdf.close()
#DEBUG
#for l in lines:
# print l[0],':',l[1],
#sys.exit(1)
return lines

def joinLineContinuations(lines,lead):
print 'Searching for line continuations...'



#print 'Searching for line continuations...'
# ??? TODO ???
plines = []
print 'Searching for line continuations...'
for line in lines:
Expand All @@ -183,12 +188,16 @@ def readGrammar(lines):
#oneof = False
for line in lines:
#debug()
current,oneof = processline(line,current,oneof)
current,oneof = processline(line,oneof)

def writeGrammar(f):
lll = open(f,'w')
# we could've taken grammar.keys() here, but we want to see grammar productions order preserved
#print grammar.keys()
for t in keys:
if t not in grammar.keys():
print 'ERROR:',t,'expected to be in the grammar, but is not there!'
continue
lll.write(t+':\n')
lll.write('\t'+grammar[t][0]+'\n')
for x in grammar[t][1:]:
Expand All @@ -198,6 +207,7 @@ def writeGrammar(f):

def massageGrammarRule(context,nt):
global nt2t
#DEBUG
for i in range(0,len(context[nt])):
tokens = context[nt][i].split()
# special case: a postfix metasymbol (e.g., *) occurs in the beggining of the line
Expand Down

0 comments on commit c69cab5

Please sign in to comment.