Skip to content

Commit

Permalink
Compile the ModelicaDiff lexer with OMC
Browse files Browse the repository at this point in the history
Added the relevant enumeration pattern matching patches to the
bootstrapping sources.
  • Loading branch information
sjoelund authored and OpenModelica-Hudson committed Jun 30, 2015
1 parent adab1ee commit 4f4dbe2
Show file tree
Hide file tree
Showing 11 changed files with 328 additions and 15 deletions.
162 changes: 157 additions & 5 deletions Compiler/Lexers/LexerModelicaDiff.mo
Expand Up @@ -15,10 +15,6 @@ encapsulated package LexerModelicaDiff "Automatically generated lexer based on f

import System;

import arrayGet = MetaModelica.Dangerous.arrayGetNoBoundsChecking; // Bounds checked with debug=true
import stringGet = MetaModelica.Dangerous.stringGetNoBoundsChecking;
import MetaModelica.Dangerous.listReverseInPlace;

constant Boolean debug = false;

replaceable package LexTable
Expand Down Expand Up @@ -52,7 +48,7 @@ algorithm
end scanString;



import DiffAlgorithm;
function action
input Integer act;
input Integer startSt;
Expand Down Expand Up @@ -699,6 +695,8 @@ protected
list<String> chars;
array<Integer> states;
String s1,s2;
import MetaModelica.Dangerous.listReverseInPlace;
import stringGet = MetaModelica.Dangerous.stringGetNoBoundsChecking;
algorithm
// load arrays

Expand Down Expand Up @@ -875,6 +873,8 @@ protected
array<Integer> mm_accept,mm_ec,mm_meta,mm_base,mm_def,mm_nxt,mm_chk,mm_acclist;
Integer lp,lp1,stCmp;
Boolean st;
import arrayGet = MetaModelica.Dangerous.arrayGetNoBoundsChecking; // Bounds checked with debug=true
import stringGet = MetaModelica.Dangerous.stringGetNoBoundsChecking;
algorithm
mm_currSt := currSt;
mm_pos := pos;
Expand Down Expand Up @@ -1338,6 +1338,158 @@ package LexTable
end LexTable;


public

function modelicaDiffTokenEq
import LexerModelicaDiff.{Token,TokenId,tokenContent};
input Token ta,tb;
output Boolean b;
protected
LexerModelicaDiff.TokenId ida,idb;
algorithm
LexerModelicaDiff.TOKEN(id=ida) := ta;
LexerModelicaDiff.TOKEN(id=idb) := tb;
if ida <> idb then
b := false;
return;
end if;
b := match ida
case TokenId.IDENT then tokenContent(ta)==tokenContent(tb);
case TokenId.UNSIGNED_INTEGER then tokenContent(ta)==tokenContent(tb);
case TokenId.UNSIGNED_REAL
then stringReal(tokenContent(ta))==stringReal(tokenContent(tb));
case TokenId.BLOCK_COMMENT
then valueEq(blockCommentCanonical(ta),blockCommentCanonical(tb));
case TokenId.LINE_COMMENT then tokenContent(ta)==tokenContent(tb);
case TokenId.STRING then tokenContent(ta)==tokenContent(tb);
case TokenId.WHITESPACE then true; // tokenContent(ta)==tokenContent(tb);
else true;
end match;
end modelicaDiffTokenEq;

function filterModelicaDiff
import LexerModelicaDiff.{Token,TokenId,tokenContent,TOKEN};
import DiffAlgorithm.Diff;
input list<tuple<Diff, list<Token>>> diffs;
output list<tuple<Diff, list<Token>>> odiffs;
protected
list<String> addedLineComments, removedLineComments;
list<list<String>> addedBlockComments, removedBlockComments;
list<tuple<Diff, Token>> simpleDiff;
algorithm
// No changes are easy
_ := match diffs
case {(Diff.Equal,_)}
algorithm
odiffs := diffs;
return;
then ();
else ();
end match;

odiffs := listReverse(match e
local
list<Token> ts;
case (Diff.Delete,ts as {TOKEN(id=TokenId.WHITESPACE)}) then (Diff.Equal,ts);
else e;
end match

for e guard(
match e
// Single addition of whitespace, not followed by another addition
// is suspected garbage added by OMC.
case (Diff.Add,{TOKEN(id=TokenId.WHITESPACE)}) then false;
case (_,{}) then false;
else true;
end match
) in diffs);

// Convert from multiple additions per item to one per item
// Costs more memory, but is easier to transform
simpleDiff := listAppend(
match e
local
list<Token> ts;
case (Diff.Add,ts) then list((Diff.Add,t) for t in ts);
case (Diff.Equal,ts) then list((Diff.Equal,t) for t in ts);
case (Diff.Delete,ts) then list((Diff.Delete,t) for t in ts);
end match
for e in odiffs);

addedLineComments := list(tokenContent(tuple22(e)) for e guard Diff.Add==tuple21(e) and isLineComment(tuple22(e)) in simpleDiff);
removedLineComments := list(tokenContent(tuple22(e)) for e guard Diff.Delete==tuple21(e) and isLineComment(tuple22(e)) in simpleDiff);

addedBlockComments := list(blockCommentCanonical(tuple22(e)) for e guard Diff.Add==tuple21(e) and isBlockComment(tuple22(e)) in simpleDiff);
removedBlockComments := list(blockCommentCanonical(tuple22(e)) for e guard Diff.Delete==tuple21(e) and isBlockComment(tuple22(e)) in simpleDiff);

simpleDiff := list(
match e
local
Token t;
case (Diff.Delete,t as TOKEN(id=TokenId.LINE_COMMENT)) then (if listMember(tokenContent(t), addedLineComments) then (Diff.Equal,t) else e);
case (Diff.Delete,t as TOKEN(id=TokenId.BLOCK_COMMENT)) then (if listMember(blockCommentCanonical(t), addedBlockComments) then (Diff.Equal,t) else e);
else e;
end match
for e guard(
match e
local
Token t;
case (Diff.Add,t as TOKEN(id=TokenId.LINE_COMMENT)) then not listMember(tokenContent(t), removedLineComments);
case (Diff.Add,t as TOKEN(id=TokenId.BLOCK_COMMENT)) then not listMember(blockCommentCanonical(t), removedBlockComments);
else true;
end match
) in simpleDiff);

odiffs := list(
match e
local
Diff d;
Token t;
case (d,t) then (d,{t});
end match
for e in simpleDiff);
end filterModelicaDiff;

function isBlockComment
import LexerModelicaDiff.{Token,TokenId,TOKEN};
input Token t;
output Boolean b;
algorithm
b := match t case TOKEN(id=TokenId.BLOCK_COMMENT) then true; else false; end match;
end isBlockComment;

function isLineComment
import LexerModelicaDiff.{Token,TokenId,TOKEN};
input Token t;
output Boolean b;
algorithm
b := match t case TOKEN(id=TokenId.LINE_COMMENT) then true; else false; end match;
end isLineComment;

function tuple21<A,B>
input tuple<A,B> t;
output A a;
algorithm
(a,_) := t;
end tuple21;

function tuple22<A,B>
input tuple<A,B> t;
output B b;
algorithm
(_,b) := t;
end tuple22;

function blockCommentCanonical
import LexerModelicaDiff.{Token,tokenContent};
input Token t;
output list<String> lines;
algorithm
// Canonical representation trims whitespace from each line
lines := list(System.trim(s) for s in System.strtok(tokenContent(t),"\n"));
end blockCommentCanonical;

annotation(__OpenModelica_Interface="backend");


end LexerModelicaDiff;
2 changes: 1 addition & 1 deletion Compiler/Lexers/Makefile.in
Expand Up @@ -9,7 +9,7 @@ LexerModelica.mo: lexerModelicaDiff.l OMCC/OMCC
mv OMCC/LexerModelicaDiff.mo "$@"
OMCC/OMCC:
test -e OMCC/.git || git clone https://github.com/OpenModelica/OpenModelica-OMCC OMCC
cd OMCC && git fetch && git pull --ff
cd OMCC && git fetch && git pull --ff-only
$(MAKE) -C OMCC omcc OMC="@OMBUILDDIR@/bin/omc"

Makefile: Makefile.in
Expand Down
154 changes: 153 additions & 1 deletion Compiler/Lexers/lexerModelicaDiff.l
@@ -1,5 +1,5 @@
%{

import DiffAlgorithm;
%}

%x c_comment
Expand Down Expand Up @@ -173,3 +173,155 @@ initialalgorithm "initial"{whitespace}"algorithm"
}

%%
public

function modelicaDiffTokenEq
import LexerModelicaDiff.{Token,TokenId,tokenContent};
input Token ta,tb;
output Boolean b;
protected
LexerModelicaDiff.TokenId ida,idb;
algorithm
LexerModelicaDiff.TOKEN(id=ida) := ta;
LexerModelicaDiff.TOKEN(id=idb) := tb;
if ida <> idb then
b := false;
return;
end if;
b := match ida
case TokenId.IDENT then tokenContent(ta)==tokenContent(tb);
case TokenId.UNSIGNED_INTEGER then tokenContent(ta)==tokenContent(tb);
case TokenId.UNSIGNED_REAL
then stringReal(tokenContent(ta))==stringReal(tokenContent(tb));
case TokenId.BLOCK_COMMENT
then valueEq(blockCommentCanonical(ta),blockCommentCanonical(tb));
case TokenId.LINE_COMMENT then tokenContent(ta)==tokenContent(tb);
case TokenId.STRING then tokenContent(ta)==tokenContent(tb);
case TokenId.WHITESPACE then true; // tokenContent(ta)==tokenContent(tb);
else true;
end match;
end modelicaDiffTokenEq;

function filterModelicaDiff
import LexerModelicaDiff.{Token,TokenId,tokenContent,TOKEN};
import DiffAlgorithm.Diff;
input list<tuple<Diff, list<Token>>> diffs;
output list<tuple<Diff, list<Token>>> odiffs;
protected
list<String> addedLineComments, removedLineComments;
list<list<String>> addedBlockComments, removedBlockComments;
list<tuple<Diff, Token>> simpleDiff;
algorithm
// No changes are easy
_ := match diffs
case {(Diff.Equal,_)}
algorithm
odiffs := diffs;
return;
then ();
else ();
end match;

odiffs := listReverse(match e
local
list<Token> ts;
case (Diff.Delete,ts as {TOKEN(id=TokenId.WHITESPACE)}) then (Diff.Equal,ts);
else e;
end match

for e guard(
match e
// Single addition of whitespace, not followed by another addition
// is suspected garbage added by OMC.
case (Diff.Add,{TOKEN(id=TokenId.WHITESPACE)}) then false;
case (_,{}) then false;
else true;
end match
) in diffs);

// Convert from multiple additions per item to one per item
// Costs more memory, but is easier to transform
simpleDiff := listAppend(
match e
local
list<Token> ts;
case (Diff.Add,ts) then list((Diff.Add,t) for t in ts);
case (Diff.Equal,ts) then list((Diff.Equal,t) for t in ts);
case (Diff.Delete,ts) then list((Diff.Delete,t) for t in ts);
end match
for e in odiffs);

addedLineComments := list(tokenContent(tuple22(e)) for e guard Diff.Add==tuple21(e) and isLineComment(tuple22(e)) in simpleDiff);
removedLineComments := list(tokenContent(tuple22(e)) for e guard Diff.Delete==tuple21(e) and isLineComment(tuple22(e)) in simpleDiff);

addedBlockComments := list(blockCommentCanonical(tuple22(e)) for e guard Diff.Add==tuple21(e) and isBlockComment(tuple22(e)) in simpleDiff);
removedBlockComments := list(blockCommentCanonical(tuple22(e)) for e guard Diff.Delete==tuple21(e) and isBlockComment(tuple22(e)) in simpleDiff);

simpleDiff := list(
match e
local
Token t;
case (Diff.Delete,t as TOKEN(id=TokenId.LINE_COMMENT)) then (if listMember(tokenContent(t), addedLineComments) then (Diff.Equal,t) else e);
case (Diff.Delete,t as TOKEN(id=TokenId.BLOCK_COMMENT)) then (if listMember(blockCommentCanonical(t), addedBlockComments) then (Diff.Equal,t) else e);
else e;
end match
for e guard(
match e
local
Token t;
case (Diff.Add,t as TOKEN(id=TokenId.LINE_COMMENT)) then not listMember(tokenContent(t), removedLineComments);
case (Diff.Add,t as TOKEN(id=TokenId.BLOCK_COMMENT)) then not listMember(blockCommentCanonical(t), removedBlockComments);
else true;
end match
) in simpleDiff);

odiffs := list(
match e
local
Diff d;
Token t;
case (d,t) then (d,{t});
end match
for e in simpleDiff);
end filterModelicaDiff;

function isBlockComment
import LexerModelicaDiff.{Token,TokenId,TOKEN};
input Token t;
output Boolean b;
algorithm
b := match t case TOKEN(id=TokenId.BLOCK_COMMENT) then true; else false; end match;
end isBlockComment;

function isLineComment
import LexerModelicaDiff.{Token,TokenId,TOKEN};
input Token t;
output Boolean b;
algorithm
b := match t case TOKEN(id=TokenId.LINE_COMMENT) then true; else false; end match;
end isLineComment;

function tuple21<A,B>
input tuple<A,B> t;
output A a;
algorithm
(a,_) := t;
end tuple21;

function tuple22<A,B>
input tuple<A,B> t;
output B b;
algorithm
(_,b) := t;
end tuple22;

function blockCommentCanonical
import LexerModelicaDiff.{Token,tokenContent};
input Token t;
output list<String> lines;
algorithm
// Canonical representation trims whitespace from each line
lines := list(System.trim(s) for s in System.strtok(tokenContent(t),"\n"));
end blockCommentCanonical;

annotation(__OpenModelica_Interface="backend");

0 comments on commit 4f4dbe2

Please sign in to comment.