diff --git a/src/ebmc/ranking-function-synthesis/nuterm-wrapper.sh b/src/ebmc/ranking-function-synthesis/nuterm-wrapper.sh new file mode 100755 index 000000000..fb3a053e5 --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm-wrapper.sh @@ -0,0 +1,38 @@ +#!/bin/sh + +BASE=$(dirname $0) + +if ! which ebmc 2>/dev/null 1>&2 ; then + echo "ebmc binary not found in PATH" + exit 1 +fi + +VERILOG_FILES=$@ + +cleanup() +{ + rm -rf "$vcd_dir" +} +vcd_dir=$(mktemp -d vcd.XXXXX) +trap cleanup EXIT + +ebmc $VERILOG_FILES --random-traces --random-seed 0 \ + --trace-steps 100 --number-of-traces 20 \ + --vcd $vcd_dir/trace.vcd + +# pyvcd does not support indexed names as arise from generate statements +perl -p -i -e 's/\[(\d+)\]/__$1__/g' $vcd_dir/trace.vcd* + +## If we were to use --neural-engine: +## python3 $BASE/nuterm/nuterm.py \ +## --strategy anticorr_sumofrelu2 \ +## $BASE/../../bla.vcd.* | \ +## tail -n 1 | cut -f2 -d: | \ +## sed 's/main\.//g' | sed 's/^/Candidate: /' +## echo + +python3 $BASE/nuterm/nuterm.py \ + --strategy anticorr_sumoflinears_or_concat \ + --vcd-prefix $vcd_dir/trace.vcd \ + --seed 0 \ + $VERILOG_FILES diff --git a/src/ebmc/ranking-function-synthesis/nuterm/LICENSE.txt b/src/ebmc/ranking-function-synthesis/nuterm/LICENSE.txt new file mode 100644 index 000000000..43a327d18 --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/LICENSE.txt @@ -0,0 +1,59 @@ +nuTerm +BSD 2-Clause License + +Copyright (c) 2021, Julian Parsert, Mirco Giacobbe, Daniel Kröning +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +---------------------------------------------------------------------- +nuTerm uses a json library for c++ found in ./deps/traceAgent/json +with the following license: + +MIT License + +Copyright (c) 2013-2022 Niels Lohmann + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +---------------------------------------------------------------------- +The license file for the SV-COMP problems can be found in +benchmarking/termination-suite/sv-comp/termination-crafted-lit-C/LICENSE.txt diff --git a/src/ebmc/ranking-function-synthesis/nuterm/README.md b/src/ebmc/ranking-function-synthesis/nuterm/README.md new file mode 100644 index 000000000..eb655009f --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/README.md @@ -0,0 +1,13 @@ +# nuTerm + +### Requirements +python packages: +- torch +- sympy +- matplotlib +- datetime +- pandas +- numpy +- tqdm +- termcolor +- psutil diff --git a/src/ebmc/ranking-function-synthesis/nuterm/javachecker.py b/src/ebmc/ranking-function-synthesis/nuterm/javachecker.py new file mode 100644 index 000000000..367fafc0c --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/javachecker.py @@ -0,0 +1,129 @@ + + +# READ THIS BEFORE REMOVING !!!!!!!! +# PLEASE LEAVE THIS THE WAY IT IS. If you remove this my workflow and everything get's destroyed. It is maybe not +# nicest solution but it work for the time being. If you remove this one has to work from the terminal and export +# all paths, which I do not think is a nice solution either, as I am not working from the terminal but the IDE. +# PLEASE leave this until we have a nice and robust solution. +import pathlib +import os + +from loopheads import get_loop_heads + +file_path = str(pathlib.Path(__file__).parent.absolute()) +# os.environ['CLASSPATH'] = ':'.join([e.path for e in os.scandir(file_path + '/../libs/')]) +from jnius import autoclass, cast + +import numpy as np + + +def check(jar_name, class_name, method_name, offset, ranking_args, ranking_fun): + #for x in os.environ['CLASSPATH']: + # print(x) + #exit(0) + RankChecker = autoclass('javachecker.RankChecker') + + URL = autoclass('java.net.URL') + URLClassLoader = autoclass('java.net.URLClassLoader') + List = autoclass('java.util.List') + + urls = [URL('jar:file:' + jar_name + '!/')] + cl = cast("java.lang.ClassLoader", URLClassLoader.newInstance(urls)) + #cl = URLClassLoader.newInstance(urls) + + args = List.of(*ranking_args) + fun = List.of(*ranking_fun) + + res = RankChecker._check(cl, class_name, method_name, offset, args, fun) + + return bool(res[0]), bool(res[1]) + + +def last_line_offset(jar_name, class_name, method_name, line): + + URL = autoclass('java.net.URL') + URLClassLoader = autoclass('java.net.URLClassLoader') + + CFGAnalyzer = autoclass('javachecker.CFGAnalyzer') + + urls = [URL('jar:file:' + jar_name + '!/')] + cl = URLClassLoader.newInstance(urls) + + line_to_offset = CFGAnalyzer.lineToLabelOffset(cl, class_name, method_name) + return line_to_offset.get(line).last() + +def check_sum_of_relu(jar_name, class_name, method_name, ranking_heads, + ranking_args, ranking_out, ranking_W, ranking_b): + + #for x in os.environ['LD_LIBRARY_PATH'].split(":"): + # print(x) + RankChecker = autoclass('javachecker.RankChecker') + + URL = autoclass('java.net.URL') + URLClassLoader = autoclass('java.net.URLClassLoader') + List = autoclass('java.util.List') + HashMap = autoclass('java.util.HashMap') + + urls = [URL('jar:file:' + jar_name + '!/')] + cl = cast("java.lang.ClassLoader", URLClassLoader.newInstance(urls)) + #cl = URLClassLoader.newInstance(urls) + + #print(ranking_W) + #print(ranking_b) + #print(ranking_out) + + assert len(ranking_W) == len(ranking_heads) + assert len(ranking_b) == len(ranking_heads) + assert len(ranking_out) == len(ranking_heads) + fun = [] + for W,b in zip(ranking_W, ranking_b): + + assert np.shape(W)[0] == np.shape(b)[0] + SOR = [] + for i in range(0, np.shape(W)[0]): + SOR.append([int(x) for x in W[i,:]] + [int(b[i])]) + fun.append(SOR) + + args = List.of(*ranking_args) + heads = List.of(*ranking_heads) + out = List.of(*[List.of(*[int(x) for x in W[0,:]]) for W in ranking_out]) + hidden = List.of(*[List.of(*[List.of(*row) for row in relus]) for relus in fun]) + cex = HashMap() + + res = RankChecker._checkLexiReluOrCex2(cl, class_name, method_name, heads, args, out, hidden, cex) + + cexDict = {} + i = cex.entrySet().iterator() + while i.hasNext(): + e = i.next() + (k,v) = e.getKey(),e.getValue() + cexDict[k] = v + + return bool(res[0]), bool(res[2]), cexDict + +if __name__ == '__main__': + + jarfile = "../benchmarking/termination-suite/termination-crafted-lit/terminating/NoriSharmaFSE2013Fig7/NoriSharmaFSE2013Fig7.jar" + classname = "NoriSharmaFSE2013Fig7" + methodname = "loop" + input_vars = ['i', 'j', 'M', 'N', 'a', 'b', 'c'] + + loop_heads = get_loop_heads(jarfile, classname, methodname) + + W_to_check = [np.array([[-1., 0., -2., 2., 1., 0., -0.], + [-0., -2., 2., -2., 3., 0., -0.], + [ 0., 0., 0., 0., 0., 0., 0.], + [-2., 1., 3., -0., -2., 0., -0.], + [-1., -1., -0., 2., -2., 0., 0.]])] + b_to_check = [np.array([1., 1., 2., 3., 3.])] + out_to_check = [np.array([[1., 1., 0., 1., 1.]])] + + + + decrease, invar, cex = check_sum_of_relu(jarfile, classname, methodname, + loop_heads, input_vars, + out_to_check, W_to_check, b_to_check) + + print("Decreasing: {}".format(decrease)) + print("Invar: {}".format(invar)) + print("Cex: {}".format(cex)) diff --git a/src/ebmc/ranking-function-synthesis/nuterm/learning.py b/src/ebmc/ranking-function-synthesis/nuterm/learning.py new file mode 100644 index 000000000..912c5b850 --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/learning.py @@ -0,0 +1,33 @@ +from torch import optim + + +def simple_learning(trace, loop_head, Model, loss_func, iterations=10000): + """ + Function for simplest learning algorithm. + """ + input_before, input_after = trace.get_pairing_of_all_traces_as_tensors(loop_head) + print("Training data: {} pairs".format(len(input_after))) + + input_dim = len(trace.get_pos_identifiers()) + input_vars = trace.get_pos_identifiers(frame=5) + + # creating model with info from trace + model = Model(input_dim, input_vars=input_vars) + + # Learning + optimiser = optim.AdamW(model.parameters(), lr=.01) + for t in range(iterations): + optimiser.zero_grad() + + output_before = model(input_before) + output_after = model(input_after) + + loss = loss_func(output_before, output_after) + + #print(t, "loss:", loss.item()) + if loss == 0.: + break + loss.backward() + optimiser.step() + + return model, loss.item() diff --git a/src/ebmc/ranking-function-synthesis/nuterm/loopheads.py b/src/ebmc/ranking-function-synthesis/nuterm/loopheads.py new file mode 100644 index 000000000..24e760e4d --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/loopheads.py @@ -0,0 +1,39 @@ +import os +import pathlib + +def get_loop_heads(jar, klass, function): + from jnius import autoclass + + # String = autoclass("java.lang.String") + # calc_lh = autoclass('main.CalculateLoopHeads') + + # x = calc_lh.calculateLoopHeadsHelper(String(jar), String(klass), String(function)) + + URL = autoclass('java.net.URL') + URLClassLoader = autoclass('java.net.URLClassLoader') + + #print() + #for x in os.environ['CLASSPATH'].split(":"): + # print(x) + #print() + CFGAnalyzer = autoclass('javachecker.CFGAnalyzer') + + urls = [URL('jar:file:' + jar + '!/')] + cl = URLClassLoader.newInstance(urls) + heads = CFGAnalyzer.loopHeaders(cl, klass, function) + print("Done") + + return heads.toArray() + + +if __name__ == '__main__': + from jnius import autoclass + + os.environ['CLASSPATH'] = str(pathlib.Path(__file__).parent.absolute()) + "../libs/CalculateLoopHeads.jar" + + String = autoclass("java.lang.String") + calc_lh = autoclass('main.CalculateLoopHeads') + + exit(0) + x = get_loop_heads("./examples/GCD/GCD.jar", "classes.GCD", "gcd") + print(x) diff --git a/src/ebmc/ranking-function-synthesis/nuterm/models/BiasExperiments.py b/src/ebmc/ranking-function-synthesis/nuterm/models/BiasExperiments.py new file mode 100644 index 000000000..ea78da91a --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/models/BiasExperiments.py @@ -0,0 +1,77 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import sympy as sp + + +class SumOfReluWithBias(nn.Module): + def __init__(self, n_in, n_out=1, n_summands=2, input_vars=None): + super(SumOfReluWithBias, self).__init__() + + self.n_summands = n_summands + self.n_in = n_in + self.n_out = n_out + + if input_vars is not None: + self.input_vars = input_vars + + self.fc1 = nn.Linear(n_in, n_summands * n_out, bias=True) + self.out = [] + for i in range(0, n_out): + row = torch.ones(n_summands) + for j in range(0, i): + row = torch.cat((torch.zeros(n_summands), row), 0) + for j in range(i + 1, n_out): + row = torch.cat((row, torch.zeros(n_summands)), 0) + self.out.append(row) + + self.out = torch.stack(self.out, dim=0) + + def forward(self, state): + layer = self.fc1(state) + layer = F.relu(layer) + layer = F.linear(layer, self.out) + return layer + + def get_weights(self): + return [self.fc1.weight.data.numpy()] + + def get_bias(self): + return [self.fc1.bias.data.numpy()] + + def get_round_weights(self): + return self.fc1.weight.data.numpy().round() + + def get_round_bias(self): + return self.fc1.bias.data.numpy().round() + + def sympy_expr(self): + relu = sp.Function('relu') + + expr = sp.Matrix(sp.symbols(self.input_vars, real=True)) + + print("##########") + #print( self.fc1.bias.data.numpy()) + + expr1 = self.fc1.weight.data.numpy() * expr + self.fc1.bias.data.numpy()[:, None] + expr1 = expr1.applyfunc(relu) + return expr1 + + def sympy_rounded_expr(self): + relu = sp.Function('relu') + + expr = sp.Matrix(sp.symbols(self.input_vars, real=True)) + expr1 = self.fc1.weight.data.numpy().round() * expr + self.fc1.bias.data.numpy().round()[:, None] + expr1 = expr1.applyfunc(relu) + return expr1 + + def print_sympy(self): + expr1 = self.sympy_expr() + + for e in expr1: + print(e) + + print("After rounding:") + expr = self.sympy_rounded_expr() + for e in expr: + print(e) diff --git a/src/ebmc/ranking-function-synthesis/nuterm/models/SingleLinear.py b/src/ebmc/ranking-function-synthesis/nuterm/models/SingleLinear.py new file mode 100644 index 000000000..f8bd04edb --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/models/SingleLinear.py @@ -0,0 +1,39 @@ +import torch.nn as nn +import sympy as sp + + +class SingleLinear(nn.Module): + + def __init__(self, input_dim, input_vars=None): + super(SingleLinear, self).__init__() + + if input_vars is not None: + self.input_vars = input_vars + + self.fc1 = nn.Linear(input_dim, 1, bias=False) + + def forward(self, state): + state = self.fc1(state) + return state + + def get_weights(self): + return self.fc1.weight.data.numpy() + + def sympy_expr(self): + expr = sp.Matrix(sp.symbols(self.input_vars, real=True)) # Careful this changed + return self.get_weights() * expr + + def sympy_rounded_expr(self): + expr = sp.Matrix(sp.symbols(self.input_vars, real=True)) # Careful this changed + return self.get_weights().round() * expr + + def print_sympy(self): + expr1 = self.sympy_expr() + + for e in expr1: + print(e) + print("After rounding:") + + expr = self.sympy_rounded_expr() + for e in expr: + print(e) \ No newline at end of file diff --git a/src/ebmc/ranking-function-synthesis/nuterm/models/SingleLinearWithRelu.py b/src/ebmc/ranking-function-synthesis/nuterm/models/SingleLinearWithRelu.py new file mode 100644 index 000000000..cf2377cf4 --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/models/SingleLinearWithRelu.py @@ -0,0 +1,47 @@ +import torch.nn as nn + +import sympy as sp +import math + + +class SingleLinearWithRelu(nn.Module): + + def __init__(self, input_dim, input_vars=None): + super(SingleLinearWithRelu, self).__init__() + + if input_vars is not None: + self.input_vars = input_vars + + self.fc1 = nn.Linear(input_dim, 1, bias=True) + + def forward(self, state): + state = self.fc1(state) + return nn.functional.relu(state) + + def get_weights(self): + return self.fc1.weight.data.numpy() + + def get_round_weights(self): + return self.fc1.weight.data.numpy().round(int(math.log10(self.get_round_scaling()))) + + def get_round_scaling(self): + return 1. + + def sympy_expr(self): + expr = sp.Matrix(sp.symbols(self.input_vars, real=True)) # Careful this changed + return self.get_weights() * expr + + def sympy_rounded_expr(self): + expr = sp.Matrix(sp.symbols(self.input_vars, real=True)) # Careful this changed + return self.get_round_weights() * expr + + def print_sympy(self): + expr1 = self.sympy_expr() + + for e in expr1: + print(e) + print("After rounding:") + + expr = self.sympy_rounded_expr() * self.get_round_scaling() + for e in expr: + print(e) diff --git a/src/ebmc/ranking-function-synthesis/nuterm/models/SumOfLinearsWithLex.py b/src/ebmc/ranking-function-synthesis/nuterm/models/SumOfLinearsWithLex.py new file mode 100644 index 000000000..a7b22e620 --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/models/SumOfLinearsWithLex.py @@ -0,0 +1,62 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import sympy as sp + + +class SumOfLinearsWithLex(nn.Module): + def __init__(self, n_in, n_out=1, n_summands=2, input_vars=None): + super(SumOfLinearsWithLex, self).__init__() + + self.n_summands = n_summands + self.n_in = n_in + self.n_out = n_out + + if input_vars is not None: + self.input_vars = input_vars + + self.fc1 = nn.Linear(n_in, n_summands * n_out, bias=False) + self.out = [] + for i in range(0, n_out): + row = torch.ones(n_summands) + for j in range(0, i): + row = torch.cat((torch.zeros(n_summands), row), 0) + for j in range(i + 1, n_out): + row = torch.cat((row, torch.zeros(n_summands)), 0) + self.out.append(row) + + self.out = torch.stack(self.out, dim=0) + + def forward(self, state): + layer = self.fc1(state) + layer = F.linear(layer, self.out) + return layer + + def get_weights(self): + return [self.fc1.weight.data.numpy()] + + def get_round_weights(self): + return self.fc1.weight.data.numpy().round() + + def sympy_expr(self): + + expr = sp.Matrix(sp.symbols(self.input_vars, real=True)) + expr1 = self.fc1.weight.data.numpy() * expr + return expr1 + + def sympy_rounded_expr(self): + + expr = sp.Matrix(sp.symbols(self.input_vars, real=True)) + expr1 = self.fc1.weight.data.numpy().round() * expr + return expr1 + + def print_sympy(self): + expr1 = self.sympy_expr() + + for e in expr1: + print(e) + print("After rounding:") + + expr = self.sympy_rounded_expr() + for e in expr: + print(e) diff --git a/src/ebmc/ranking-function-synthesis/nuterm/models/SumOfRelu.py b/src/ebmc/ranking-function-synthesis/nuterm/models/SumOfRelu.py new file mode 100644 index 000000000..918469ca1 --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/models/SumOfRelu.py @@ -0,0 +1,67 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import sympy as sp + + +class SumOfRelu(nn.Module): + def __init__(self, n_in, n_out=1, n_summands=2, input_vars=None): + super(SumOfRelu, self).__init__() + + self.n_summands = n_summands + self.n_in = n_in + self.n_out = n_out + + if input_vars is not None: + self.input_vars = input_vars + + self.fc1 = nn.Linear(n_in, n_summands*n_out, bias=False) + self.out = [] + for i in range(0, n_out): + row = torch.ones(n_summands) + for j in range(0, i): + row = torch.cat((torch.zeros(n_summands), row), 0) + for j in range(i+1, n_out): + row = torch.cat((row, torch.zeros(n_summands)), 0) + self.out.append(row) + + self.out = torch.stack(self.out, dim=0) + + def forward(self, state): + layer = self.fc1(state) + layer = F.relu(layer) + layer = F.linear(layer, self.out) + return layer + + def get_weights(self): + return [self.fc1.weight.data.numpy()] + + def get_round_weights(self): + return self.fc1.weight.data.numpy().round() + + def sympy_expr(self): + relu = sp.Function('relu') + + expr = sp.Matrix(sp.symbols(self.input_vars, real=True)) + expr1 = self.fc1.weight.data.numpy() * expr + expr1 = expr1.applyfunc(relu) + return expr1 + + def sympy_rounded_expr(self): + relu = sp.Function('relu') + + expr = sp.Matrix(sp.symbols(self.input_vars, real=True)) + expr1 = self.fc1.weight.data.numpy().round() * expr + expr1 = expr1.applyfunc(relu) + return expr1 + + def print_sympy(self): + expr1 = self.sympy_expr() + + for e in expr1: + print(e) + print("After rounding:") + + expr = self.sympy_rounded_expr() + for e in expr: + print(e) diff --git a/src/ebmc/ranking-function-synthesis/nuterm/models/SumOfRelu2.py b/src/ebmc/ranking-function-synthesis/nuterm/models/SumOfRelu2.py new file mode 100644 index 000000000..207fb28b8 --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/models/SumOfRelu2.py @@ -0,0 +1,40 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import sympy as sp + + +class SumOfRelu2(nn.Module): + def __init__(self, n_in, n_out=1, n_summands=2, bias=True, trainable_out=False): + super(SumOfRelu2, self).__init__() + + self.n_summands = n_summands + self.n_in = n_in + self.n_out = n_out + + self.fc1 = [] + self.fc2 = [] + for i in range(0, self.n_out): + hidden = nn.Linear(n_in, n_summands, bias=bias) + output = nn.Linear(n_summands, 1, bias=False) + self.register_parameter("hidden"+str(i), hidden.weight) + if hidden.bias is not None: + self.register_parameter("bias"+str(i), hidden.bias) + + if trainable_out: + self.register_parameter("out"+str(i), output.weight) + else: + nn.init.constant_(output.weight, 1.) + + + self.fc1.append(hidden) + self.fc2.append(output) + + def forward(self, state): + res = [] + for i in range(0, self.n_out): + layer = self.fc1[i](state) + layer = F.relu(layer) + layer = self.fc2[i](layer) + res.append(layer) + return res diff --git a/src/ebmc/ranking-function-synthesis/nuterm/models/TwoLayerRelu.py b/src/ebmc/ranking-function-synthesis/nuterm/models/TwoLayerRelu.py new file mode 100644 index 000000000..84a78408c --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/models/TwoLayerRelu.py @@ -0,0 +1,67 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import sympy as sp +import numpy as np + + +class TwoLayerRelu(nn.Module): + def __init__(self, n_in, n_summands=10, input_vars=None): + super(TwoLayerRelu, self).__init__() + + self.n_summands = n_summands + self.n_in = n_in + + if input_vars is not None: + self.input_vars = input_vars + + self.fc1 = nn.Linear(n_in, n_summands, bias=False) + self.fc2 = nn.Linear(n_summands, 1, bias=False) + + def forward(self, state): + layer = self.fc1(state) # first layer + layer = F.relu(layer) + layer = self.fc2(layer) # second layer + layer = F.relu(layer) + return layer + + def get_weights(self): + return [self.fc1.weight.data.numpy(), self.fc2.weight.data.numpy()] + + def get_round_weights(self): + return [self.fc1.weight.data.numpy().round(), self.fc2.weight.data.numpy().round()] + + def sympy_expr(self): + relu = sp.Function('relu') + + expr = sp.Matrix(sp.symbols(self.input_vars, real=True)) + expr1 = self.fc1.weight.data.numpy() * expr + expr1 = expr1.applyfunc(relu) + + expr2 = self.fc2.weight.data.numpy() * expr1 # Todo check if this is correct + expr2 = expr2.applyfunc(relu) + + return expr2 + + def sympy_rounded_expr(self): + relu = sp.Function('relu') + + expr = sp.Matrix(sp.symbols(self.input_vars, real=True)) + expr1 = self.fc1.weight.data.numpy().round() * expr + expr1 = expr1.applyfunc(relu) + + expr2 = self.fc2.weight.data.numpy().round() * expr1 # Todo check if this is correct + expr2 = expr2.applyfunc(relu) + + return expr2 + + def print_sympy(self): + expr1 = self.sympy_expr() + + for e in expr1: + print(e) + print("After rounding:") + + expr = self.sympy_rounded_expr() + for e in expr: + print(e) diff --git a/src/ebmc/ranking-function-synthesis/nuterm/models/__init__.py b/src/ebmc/ranking-function-synthesis/nuterm/models/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/ebmc/ranking-function-synthesis/nuterm/nuterm.py b/src/ebmc/ranking-function-synthesis/nuterm/nuterm.py new file mode 100644 index 000000000..f79b99071 --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/nuterm.py @@ -0,0 +1,104 @@ +import os +import pathlib + +import matplotlib.pyplot as plt +from datetime import datetime +import matplotlib.ticker as mtick +from matplotlib.ticker import PercentFormatter +import json +import torch +import io +import json +import os +import numpy as np + +from strategies import anticorr_sumofrelu, single_linear_with_relu, anticorr_sumofrelu2_cegsloop, anticorr_sumofrelu2,\ + anticorr_sumofrelu_baseline, anticorr_sumofrelu2_gaussian, anticorr_sumofrelu_dynamic_nodes, anticorr_sumoflinears_or_concat + + + +# ======================================= +# - initialise classpath +# ======================================= +## file_path = str(pathlib.Path(__file__).parent.absolute()) +## os.environ['CLASSPATH'] = file_path + "/../libs/share/java/com.microsoft.z3.jar" + ':' + ':'.join( +## [e.path for e in os.scandir(file_path + '/../libs/')]) +## os.environ['LD_LIBRARY_PATH'] = file_path + "/../libs/lib/" + +import argparse +import random +import glob + + +strategies = { + "None": anticorr_sumofrelu2, + "anticorr_sumofrelu" : anticorr_sumofrelu, + "single_linear_with_relu": single_linear_with_relu, + "anticorr_sumoflinears_or_concat": anticorr_sumoflinears_or_concat, + "anticorr_sumofrelu2": anticorr_sumofrelu2, + "anticorr_sumofrelu_baseline": anticorr_sumofrelu_baseline, + "anticorr_sumofrelu2_gaussian" : anticorr_sumofrelu2_gaussian, + "anticorr_sumofrelu2_cegsloop" : anticorr_sumofrelu2_cegsloop, + "anticorr_sumofrelu_dynamic_nodes": anticorr_sumofrelu_dynamic_nodes, +} + + +def main(args): + + if args.seed is not None: + torch.manual_seed(args.seed) + random.seed(args.seed) + np.random.seed(args.seed) + # maybe not + torch.use_deterministic_algorithms(True) + + strategy = strategies[args.strategy] + + vcds = glob.glob(args.vcd_prefix + '*') + + print("Running {} on VCD-formatted traces {}".format(args.strategy, + vcds)) + + res = None + if args.strat_args is not None: + res = strategy(args.verilog, vcds, seed=args.seed, strat_args=args.strat_args) + else: + res = strategy(args.verilog, vcds, seed=args.seed) + return + res["strategy"] = strategies[args.strategy].__name__ + res["seed"] = args.seed + print(json.dumps(res)) + if 'decrease' in res: + if (res['decrease']): + print("Termination was proven.") + print('YES') + else: + print("Termination could not be proven.") + print('MAYBE') + else: + print("Termination could not be proven.") + print('MAYBE') + + +if __name__ == '__main__': + + # CHANGE THIS TO CHANGE DEFAULT SET TO RUN + + # ======================================= + # - parse arguments + # ======================================= + parser = argparse.ArgumentParser() + # parser.add_argument('--seed', dest='seed', type=int, default=199) + parser.add_argument('--vcd-prefix', dest='vcd_prefix', help='Prefix of input VCD files') + parser.add_argument('verilog', nargs='+', help='Input (System) Verilog files') + parser.add_argument('--strategy', dest='strategy', help='Strategy to use for analysis.', required=True) + parser.add_argument('--seed', dest='seed', help='Seed', type=int, required=False) + parser.add_argument('--stratargs', dest='strat_args', type=str, help='Special arguments for strategies. Careful, this is strategy dependent.', required=False) + + + args = parser.parse_args() + + main(args) + + + #run_experiments(aprove_09_programs) diff --git a/src/ebmc/ranking-function-synthesis/nuterm/problem_sets.py b/src/ebmc/ranking-function-synthesis/nuterm/problem_sets.py new file mode 100644 index 000000000..5b18112d2 --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/problem_sets.py @@ -0,0 +1,366 @@ +from strategies import * + +# =================================== +# - experiments +# =================================== +aprove_09_programs = [ + # =================================== + # - single loop linear + # =================================== + ('termination-suite/Aprove_09/GCD5/GCD5.jar', 'GCD5', 'gcd', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/LogBuiltIn/LogBuiltIn.jar', 'LogBuiltIn', 'log', anticorr_sumofrelu2), + # ('termination-suite/Aprove_09/McCarthyIterative/McCarthyIterative.jar', 'McCarthyIterative', 'loop', anticorr_sumofrelu2), slow + ('termination-suite/Aprove_09/MinusBuiltIn/MinusBuiltIn.jar', 'MinusBuiltIn', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaA4/PastaA4.jar', 'PastaA4', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaA5/PastaA5.jar', 'PastaA5', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaA6/PastaA6.jar', 'PastaA6', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaA7/PastaA7.jar', 'PastaA7', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaB2/PastaB2.jar', 'PastaB2', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaB6/PastaB6.jar', 'PastaB6', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaB7/PastaB7.jar', 'PastaB7', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaB10/PastaB10.jar', 'PastaB10', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaB11/PastaB11.jar', 'PastaB11', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaB1/PastaB1.jar', 'PastaB1', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaB5/PastaB5.jar', 'PastaB5', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaA9/PastaA9.jar', 'PastaA9', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/CountUpRound/CountUpRound.jar', 'CountUpRound', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/DivMinus/DivMinus.jar', 'DivMinus', 'div', anticorr_sumofrelu2), + + # -- supporting invar + ('termination-suite/Aprove_09/PastaB8/PastaB8.jar', 'PastaB8', 'loop', anticorr_sumofrelu2), + # needs invariant x >= 0, while precondition imposes x > 0 + + # -- nondeterminism; We do not support this + # ('termination-suite/Aprove_09/PastaC9/PastaC9.jar', 'PastaC9', 'loop', None), + # ('termination-suite/Aprove_09/PastaC10/PastaC10.jar', 'PastaC10', 'loop', None), + # ('termination-suite/Aprove_09/PastaC11/PastaC11.jar', 'PastaC11', 'loop', None), + + # =================================== + # - two loops linear lexicographic + # =================================== + ('termination-suite/Aprove_09/PastaA1/PastaA1.jar', 'PastaA1', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaB16/PastaB16.jar', 'PastaB16', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaB17/PastaB17.jar', 'PastaB17', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaC1/PastaC1.jar', 'PastaC1', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaC2/PastaC2.jar', 'PastaC2', 'loop', anticorr_sumofrelu2), + + # -- supporting invar + ('termination-suite/Aprove_09/PastaB18/PastaB18.jar', 'PastaB18', 'loop', anticorr_sumofrelu2), + + # =================================== + # - single loop non-linear + # =================================== + ('termination-suite/Aprove_09/PastaB12/PastaB12.jar', 'PastaB12', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaB13/PastaB13.jar', 'PastaB13', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaA10/PastaA10.jar', 'PastaA10', 'loop', anticorr_sumofrelu2), # abs(x - y) + ('termination-suite/Aprove_09/PastaC3/PastaC3.jar', 'PastaC3', 'loop', anticorr_sumofrelu2), + # A solution is y - min{x,z}, or Relu(y-x) + Relu (y-z) + ('termination-suite/Aprove_09/LogIterative/LogIterative.jar', 'LogIterative', 'log', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/LogMult/LogMult.jar', 'LogMult', 'log', anticorr_sumofrelu2), + + # -- supporting invar + ('termination-suite/Aprove_09/PastaC5/PastaC5.jar', 'PastaC5', 'loop', anticorr_sumofrelu2), + + # =================================== + # - unknown strategy + # =================================== + ('termination-suite/Aprove_09/Round3/Round3.jar', 'Round3', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaB14/PastaB14.jar', 'PastaB14', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaB15/PastaB15.jar', 'PastaB15', 'loop', anticorr_sumofrelu2), + + # =================================== + # - Pathological cases (Drawbacks discussed in submitted paper) + # =================================== + # argument changes at each iteration + ('termination-suite/Aprove_09/PlusSwap/PlusSwap.jar', 'PlusSwap', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PlusSwapUnroll2/PlusSwapUnroll2.jar', 'PlusSwapUnroll2', 'loop', anticorr_sumofrelu2), + # This loop executes exactly once. the while can basically be replaced by if (x > y) and it does a swap. + ('termination-suite/Aprove_09/PastaB4/PastaB4.jar', 'PastaB4', 'loop', anticorr_sumofrelu2), + # Maybe we require an invariant or something. I think the ranking function is close to k - j but it gets interupted by the first loop condition when j is >=100 + ('termination-suite/Aprove_09/PastaC7/PastaC7.jar', 'PastaC7', 'loop', anticorr_sumofrelu2), + ('termination-suite/Aprove_09/PastaB3/PastaB3.jar', 'PastaB3', 'loop', anticorr_sumofrelu2), # one iteration only + # - model not expressive enough + # Usually the trace is simply too short, i.e. 3/4 long +] + + +sv_comp_termination = [ + #working + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-speedpldi4/AliasDarteFeautrierGonnordSAS2010speedpldi4.jar', 'AliasDarteFeautrierGonnordSAS2010speedpldi4', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/ChawdharyCookGulwaniSagivYang-ESOP2008-easy1/ChawdharyCookGulwaniSagivYangESOP2008easy1.jar', 'ChawdharyCookGulwaniSagivYangESOP2008easy1', "loop", i40_rank), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-easy1/AliasDarteFeautrierGonnordSAS2010easy1.jar', 'AliasDarteFeautrierGonnordSAS2010easy1', "loop", i40_rank), # This is the same program as ChawdharyCookGulwaniSagivYangESOP2008easy1 + ('termination-suite/termination-crafted-lit/terminating/BradleyMannaSipma-CAV2005-Fig1/BradleyMannaSipmaCAV2005Fig1.jar', 'BradleyMannaSipmaCAV2005Fig1', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/LeikeHeizmann-TACAS2014-Ex7/LeikeHeizmannTACAS2014Ex7.jar', 'LeikeHeizmannTACAS2014Ex7', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-easy2-1/AliasDarteFeautrierGonnordSAS2010easy21.jar', 'AliasDarteFeautrierGonnordSAS2010easy21', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex2.10/ChenFlurMukhopadhyaySAS2012Ex210.jar', 'ChenFlurMukhopadhyaySAS2012Ex210', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex3.08/ChenFlurMukhopadhyaySAS2012Ex308.jar', 'ChenFlurMukhopadhyaySAS2012Ex308', "loop", anticorr_sumofrelu), # This is probably gonna stay hard + ('termination-suite/termination-crafted-lit/terminating/aviad/Aviad.jar', 'Aviad', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-ndecr/AliasDarteFeautrierGonnordSAS2010ndecr.jar', 'AliasDarteFeautrierGonnordSAS2010ndecr', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/GulavaniGulwani-CAV2008-Fig1c/GulavaniGulwaniCAV2008Fig1c.jar', 'GulavaniGulwaniCAV2008Fig1c', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/HeizmannHoenickeLeikePodelski-ATVA2013-Fig8/HeizmannHoenickeLeikePodelskiATVA2013Fig8.jar', 'HeizmannHoenickeLeikePodelskiATVA2013Fig8', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/CookSeeZuleger-TACAS2013-Fig8a-modified/CookSeeZulegerTACAS2013Fig8amodified.jar', 'CookSeeZulegerTACAS2013Fig8amodified', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/CookSeeZuleger-TACAS2013-Fig8a/CookSeeZulegerTACAS2013Fig8a.jar', 'CookSeeZulegerTACAS2013Fig8a', "loop", anticorr_sumofrelu), + #('termination-suite/termination-crafted-lit/terminating/genady/Genady.jar', 'Genady', "loop", genady_test), genady_test not found + ('termination-suite/termination-crafted-lit/terminating/HeizmannHoenickeLeikePodelski-ATVA2013-Fig9/HeizmannHoenickeLeikePodelskiATVA2013Fig9.jar', 'HeizmannHoenickeLeikePodelskiATVA2013Fig9', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex2.19/ChenFlurMukhopadhyaySAS2012Ex219.jar', 'ChenFlurMukhopadhyaySAS2012Ex219', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex2.07/ChenFlurMukhopadhyaySAS2012Ex207.jar','ChenFlurMukhopadhyaySAS2012Ex207', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/LeikeHeizmannWST2014Ex9/LeikeHeizmannWST2014Ex9.jar', 'LeikeHeizmannWST2014Ex9', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/ChawdharyCookGulwaniSagivYang-ESOP2008-easy2/ChawdharyCookGulwaniSagivYangESOP2008easy2.jar','ChawdharyCookGulwaniSagivYangESOP2008easy2', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/LeikeHeizmann-TACAS2014-Ex1/LeikeHeizmannTACAS2014Ex1.jar','LeikeHeizmannTACAS2014Ex1', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/HeizmannHoenickeLeikePodelski-ATVA2013-Fig4/HeizmannHoenickeLeikePodelskiATVA2013Fig4.jar','HeizmannHoenickeLeikePodelskiATVA2013Fig4', "loop", None), #TODO Exception with mvgaussian + ('termination-suite/termination-crafted-lit/terminating/LeikeHeizmann-TACAS2014-Ex9/LeikeHeizmannTACAS2014Ex9.jar','LeikeHeizmannTACAS2014Ex9', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-wise/AliasDarteFeautrierGonnordSAS2010wise.jar','AliasDarteFeautrierGonnordSAS2010wise', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-easy2-2/AliasDarteFeautrierGonnordSAS2010easy22.jar','AliasDarteFeautrierGonnordSAS2010easy22', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex2.13/ChenFlurMukhopadhyaySAS2012Ex213.jar','ChenFlurMukhopadhyaySAS2012Ex213', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/HeizmannHoenickeLeikePodelski-ATVA2013-Fig5-modified/HeizmannHoenickeLeikePodelskiATVA2013Fig5modified.jar', 'HeizmannHoenickeLeikePodelskiATVA2013Fig5modified', "loop", anticorr_sumofrelu), # TODO check if this is a bug + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex3.01/ChenFlurMukhopadhyaySAS2012Ex301.jar', 'ChenFlurMukhopadhyaySAS2012Ex301', "loop", anticorr_sumofrelu), + ( + 'termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex2.16/ChenFlurMukhopadhyaySAS2012Ex216.jar', + 'ChenFlurMukhopadhyaySAS2012Ex216', "loop", anticorr_sumofrelu), + + # ########################## + # Does not check correctly + # ########################## + # We learn relu(x), relu(y), relu(x+y), relu(x - y) and the checker rejects all of them. But it should accept at least one of them (the first one, probably) + ('termination-suite/termination-crafted-lit/terminating/HeizmannHoenickeLeikePodelski-ATVA2013-Fig1/HeizmannHoenickeLeikePodelskiATVA2013Fig1.jar', 'HeizmannHoenickeLeikePodelskiATVA2013Fig1', "loop", anticorr_sumofrelu), + # The RF is very easy. But the checker does not accept y even though it is learned + ('termination-suite/termination-crafted-lit/terminating/PodelskiRybalchenkoTACAS2011Fig2/PodelskiRybalchenkoTACAS2011Fig1.jar','PodelskiRybalchenkoTACAS2011Fig1', "loop", anticorr_sumofrelu), + # Not sure if we learn the correct function but checking throws an exception either way + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-speedFails4/AliasDarteFeautrierGonnordSAS2010speedFails4.jar','AliasDarteFeautrierGonnordSAS2010speedFails4', "loop", None), + # Modified version (HeizmannHoenickeLeikePodelski-ATVA2013-Fig5-modified) checks correctly as we add invariant to loop guard + ( 'termination-suite/termination-crafted-lit/terminating/HeizmannHoenickeLeikePodelski-ATVA2013-Fig5/HeizmannHoenickeLeikePodelskiATVA2013Fig5.jar','HeizmannHoenickeLeikePodelskiATVA2013Fig5', "loop", anticorr_sumofrelu), # TODO check if this is a bug + + # ########################## + # Does not learn correctly + # No known technical issues but doesn't work for unknown reasons (maybe pathological case, maybe the RF is to complicated) + # ########################## + ('termination-suite/termination-crafted-lit/terminating/NoriSharmaFSE2013Fig7/NoriSharmaFSE2013Fig7.jar','NoriSharmaFSE2013Fig7', "loop", anticorr_sumofrelu), # TODO Probably easy to fix + #todo Might be easy, might be pathological. rf might be -x + y, but we learn - x - z note that we have the instructio x = z all the time + + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex3.10/ChenFlurMukhopadhyaySAS2012Ex310.jar', 'ChenFlurMukhopadhyaySAS2012Ex310', "loop", anticorr_sumofrelu), + # Probably complex ranking function + ('termination-suite/termination-crafted-lit/terminating/GopanReps-CAV2006-Fig1a/GopanRepsCAV2006Fig1a.jar','GopanRepsCAV2006Fig1a', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex3.04/ChenFlurMukhopadhyaySAS2012Ex304.jar','ChenFlurMukhopadhyaySAS2012Ex304', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/Masse-VMCAI2014-Ex6/MasseVMCAI2014Ex6.jar','MasseVMCAI2014Ex6', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex2.06/ChenFlurMukhopadhyaySAS2012Ex206.jar','ChenFlurMukhopadhyaySAS2012Ex206', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-speedpldi2/AliasDarteFeautrierGonnordSAS2010speedpldi2.jar','AliasDarteFeautrierGonnordSAS2010speedpldi2', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex3.07/ChenFlurMukhopadhyaySAS2012Ex307.jar','ChenFlurMukhopadhyaySAS2012Ex307', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-speedpldi3/AliasDarteFeautrierGonnordSAS2010speedpldi3.jar','AliasDarteFeautrierGonnordSAS2010speedpldi3', "loop", anticorr_sumofrelu), + ('termination-suite/terminaNoriSharmaFSE2013Fig7tion-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex2.08/ChenFlurMukhopadhyaySAS2012Ex208.jar', 'ChenFlurMukhopadhyaySAS2012Ex208', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex2.21/ChenFlurMukhopadhyaySAS2012Ex221.jar','ChenFlurMukhopadhyaySAS2012Ex221', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/BrockschmidtCookFuhs-CAV2013-Introduction/BrockschmidtCookFuhsCAV2013Introduction.jar','BrockschmidtCookFuhsCAV2013Introduction', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-terminate/AliasDarteFeautrierGonnordSAS2010terminate.jar','AliasDarteFeautrierGonnordSAS2010terminate', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/PodelskiRybalchenkoVMCAI2004Ex2/PodelskiRybalchenkoVMCAI2004Ex2.jar','PodelskiRybalchenkoVMCAI2004Ex2', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/HeizmannHoenickeLeikePodelski-ATVA2013-Fig2/HeizmannHoenickeLeikePodelskiATVA2013Fig2.jar','HeizmannHoenickeLeikePodelskiATVA2013Fig2', "loop", anticorr_sumofrelu), + # complex, probably with max + ('termination-suite/termination-crafted-lit/terminating/GulavaniGulwani-CAV2008-Fig1a/GulavaniGulwaniCAV2008Fig1a.jar', 'GulavaniGulwaniCAV2008Fig1a', "loop", anticorr_sumofrelu), + # anticorr_sumofrelu non linear + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex3.05/ChenFlurMukhopadhyaySAS2012Ex305.jar', 'ChenFlurMukhopadhyaySAS2012Ex305', "loop", anticorr_sumofrelu), + # anticorr_sumofrelu non linear + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex4.01/ChenFlurMukhopadhyaySAS2012Ex401.jar', 'ChenFlurMukhopadhyaySAS2012Ex401', "loop", anticorr_sumofrelu), + #hard + ('termination-suite/termination-crafted-lit/terminating/KroeningSharyginaTsitovichWintersteiger-CAV2010-Fig1/KroeningSharyginaTsitovichWintersteigerCAV2010Fig1.jar', 'KroeningSharyginaTsitovichWintersteigerCAV2010Fig1', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/CookSeeZuleger-TACAS2013-Fig8b/CookSeeZulegerTACAS2013Fig8b.jar', 'CookSeeZulegerTACAS2013Fig8b', "loop", anticorr_sumofrelu), # Simple program but maybe complex rf + ('termination-suite/termination-crafted-lit/terminating/Masse-VMCAI2014-Fig1a/MasseVMCAI2014Fig1a.jar', 'MasseVMCAI2014Fig1a', "loop", anticorr_sumofrelu), # rf is probably a but the operations on b make it difficult to learn + # Complex problem + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-cousot9/AliasDarteFeautrierGonnordSAS2010cousot9.jar', 'AliasDarteFeautrierGonnordSAS2010cousot9', "loop", anticorr_sumofrelu), + # hard, non-linear + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex3.03/ChenFlurMukhopadhyaySAS2012Ex303.jar', 'ChenFlurMukhopadhyaySAS2012Ex303', "loop", anticorr_sumofrelu), + # hard, might be pathological + ('termination-suite/termination-crafted-lit/terminating/ColonSipma-TACAS2001-Fig1/ColonSipmaTACAS2001Fig1.jar', 'ColonSipmaTACAS2001Fig1', "loop", anticorr_sumofrelu), + # hard + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Fig1/ChenFlurMukhopadhyaySAS2012Fig1.jar', 'ChenFlurMukhopadhyaySAS2012Fig1', "loop", anticorr_sumofrelu), + # No idea, probably hard + ('termination-suite/termination-crafted-lit/terminating/NoriSharmaFSE2013Fig8/NoriSharmaFSE2013Fig8.jar', 'NoriSharmaFSE2013Fig8', "loop", anticorr_sumofrelu), + # No idea, probably hard, non linear + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex2.18/ChenFlurMukhopadhyaySAS2012Ex218.jar', 'ChenFlurMukhopadhyaySAS2012Ex218', "loop", anticorr_sumofrelu), + # Hard, probably non-linear + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex2.09/ChenFlurMukhopadhyaySAS2012Ex209.jar', 'ChenFlurMukhopadhyaySAS2012Ex209', "loop", anticorr_sumofrelu), + + # Nested Loops + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-nestedLoop-2/AliasDarteFeautrierGonnordSAS2010nestedLoop2.jar','AliasDarteFeautrierGonnordSAS2010nestedLoop2', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/LarrazOliverasRodriguez-CarbonellRubio-FMCAD2013-Fig1/LarrazOliverasRodriguezCarbonellRubioFMCAD2013Fig1.jar','LarrazOliverasRodriguezCarbonellRubioFMCAD2013Fig1', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-nestedLoop-1/AliasDarteFeautrierGonnordSAS2010nestedLoop1.jar','AliasDarteFeautrierGonnordSAS2010nestedLoop1', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/Urban-WST2013-Fig2/UrbanWST2013Fig2.jar','UrbanWST2013Fig2', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-wcet2/AliasDarteFeautrierGonnordSAS2010wcet2.jar','AliasDarteFeautrierGonnordSAS2010wcet2', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/BrockschmidtCookFuhs-CAV2013-Fig9a/BrockschmidtCookFuhsCAV2013Fig9a.jar','BrockschmidtCookFuhsCAV2013Fig9a', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-complex/AliasDarteFeautrierGonnordSAS2010complex.jar','AliasDarteFeautrierGonnordSAS2010complex', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-while2/AliasDarteFeautrierGonnordSAS2010while2.jar','AliasDarteFeautrierGonnordSAS2010while2', "loop", anticorr_sumofrelu), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-loops/AliasDarteFeautrierGonnordSAS2010loops.jar','AliasDarteFeautrierGonnordSAS2010loops', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/PodelskiRybalchenkoLICS2004Fig1/PodelskiRybalchenkoLICS2004Fig1.jar','PodelskiRybalchenkoLICS2004Fig1', "loop", None), + + # ########################## + # Bugs, Unknown, and other + # ########################## + # TODO this should not work, but it says it does. It has nested loops, hence it should not work. I think this is a bug in the checker + ('termination-suite/termination-crafted-lit/terminating/BrockschmidtCookFuhs-CAV2013-Fig1/BrockschmidtCookFuhsCAV2013Fig1.jar', + 'BrockschmidtCookFuhsCAV2013Fig1', "loop", anticorr_sumofrelu), + + # ########################## + # Pathological Cases + # ########################## + ( + 'termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex2.01/ChenFlurMukhopadhyaySAS2012Ex201.jar', + 'ChenFlurMukhopadhyaySAS2012Ex201', "loop", anticorr_sumofrelu), + ( + 'termination-suite/termination-crafted-lit/terminating/LeikeHeizmann-TACAS2014-Fig1/LeikeHeizmannTACAS2014Fig1.jar', + 'LeikeHeizmannTACAS2014Fig1', "loop", anticorr_sumofrelu), + ( + 'termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex1.01/ChenFlurMukhopadhyaySAS2012Ex101.jar', + 'ChenFlurMukhopadhyaySAS2012Ex101', "loop", None), # PATHOLOGICAL CASE, short traces + +] + + # ########################## + # Not supported operations + # ########################## + +sv_comp_unsupported =[ + + #Uses Java.util.Random() + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex3.09/ChenFlurMukhopadhyaySAS2012Ex309.jar','ChenFlurMukhopadhyaySAS2012Ex309', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-aaron2/AliasDarteFeautrierGonnordSAS2010aaron2.jar', 'AliasDarteFeautrierGonnordSAS2010aaron2', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/LeikeHeizmann-TACAS2014-Ex8/LeikeHeizmannTACAS2014Ex8.jar', 'LeikeHeizmannTACAS2014Ex8', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex2.22/ChenFlurMukhopadhyaySAS2012Ex222.jar','ChenFlurMukhopadhyaySAS2012Ex222', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-random1d-1/AliasDarteFeautrierGonnordSAS2010random1d1.jar', 'AliasDarteFeautrierGonnordSAS2010random1d1', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/CookSeeZuleger-TACAS2013-Fig7b/CookSeeZulegerTACAS2013Fig7b.jar','CookSeeZulegerTACAS2013Fig7b', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChenCookFuhsNimkarOHearn-TACAS2014-Introduction/ChenCookFuhsNimkarOHearnTACAS2014Introduction.jar','ChenCookFuhsNimkarOHearnTACAS2014Introduction', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-random2d/AliasDarteFeautrierGonnordSAS2010random2d.jar','AliasDarteFeautrierGonnordSAS2010random2d', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChawdharyCookGulwaniSagivYang-ESOP2008-aaron6/ChawdharyCookGulwaniSagivYangESOP2008aaron6.jar','ChawdharyCookGulwaniSagivYangESOP2008aaron6', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChawdharyCookGulwaniSagivYang-ESOP2008-random1d/ChawdharyCookGulwaniSagivYangESOP2008random1d.jar','ChawdharyCookGulwaniSagivYangESOP2008random1d', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/Masse-VMCAI2014-Fig1b/MasseVMCAI2014Fig1b.jar','MasseVMCAI2014Fig1b', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-exmini/AliasDarteFeautrierGonnordSAS2010exmini.jar','AliasDarteFeautrierGonnordSAS2010exmini', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-rsd/AliasDarteFeautrierGonnordSAS2010rsd.jar','AliasDarteFeautrierGonnordSAS2010rsd', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChawdharyCookGulwaniSagivYang-ESOP2008-aaron4/ChawdharyCookGulwaniSagivYangESOP2008aaron4.jar','ChawdharyCookGulwaniSagivYangESOP2008aaron4', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/UrbanMine-ESOP2014-Fig3/UrbanMineESOP2014Fig3.jar','UrbanMineESOP2014Fig3', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-aaron3/AliasDarteFeautrierGonnordSAS2010aaron3.jar','AliasDarteFeautrierGonnordSAS2010aaron3', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChawdharyCookGulwaniSagivYang-ESOP2008-aaron1/ChawdharyCookGulwaniSagivYangESOP2008aaron1.jar','ChawdharyCookGulwaniSagivYangESOP2008aaron1', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/HeizmannHoenickeLeikePodelski-ATVA2013-Fig6/HeizmannHoenickeLeikePodelskiATVA2013Fig6.jar','HeizmannHoenickeLeikePodelskiATVA2013Fig6', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChawdharyCookGulwaniSagivYang-ESOP2008-aaron12/ChawdharyCookGulwaniSagivYangESOP2008aaron12.jar','ChawdharyCookGulwaniSagivYangESOP2008aaron12', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/Ben-Amram-LMCS2010-Ex2.3/BenAmramLMCS2010Ex23.jar','BenAmramLMCS2010Ex23', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChawdharyCookGulwaniSagivYang-ESOP2008-aaron3/ChawdharyCookGulwaniSagivYangESOP2008aaron3.jar','ChawdharyCookGulwaniSagivYangESOP2008aaron3', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChawdharyCookGulwaniSagivYang-ESOP2008-random2d/ChawdharyCookGulwaniSagivYangESOP2008random2d.jar','ChawdharyCookGulwaniSagivYangESOP2008random2d', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-Fig1/AliasDarteFeautrierGonnordSAS2010Fig1.jar','AliasDarteFeautrierGonnordSAS2010Fig1', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/BradleyMannaSipma-ICALP2005-Fig1/BradleyMannaSipmaICALP2005Fig1.jar', 'BradleyMannaSipmaICALP2005Fig1', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/KroeningSharyginaTsitovichWintersteiger-CAV2010-Ex/KroeningSharyginaTsitovichWintersteigerCAV2010Ex.jar','KroeningSharyginaTsitovichWintersteigerCAV2010Ex', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-Fig2b/AliasDarteFeautrierGonnordSAS2010Fig2b.jar','AliasDarteFeautrierGonnordSAS2010Fig2b', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex1.03/ChenFlurMukhopadhyaySAS2012Ex103.jar','ChenFlurMukhopadhyaySAS2012Ex103', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-Fig2a/AliasDarteFeautrierGonnordSAS2010Fig2a.jar','AliasDarteFeautrierGonnordSAS2010Fig2a', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex1.05/ChenFlurMukhopadhyaySAS2012Ex105.jar','ChenFlurMukhopadhyaySAS2012Ex105', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex1.04/ChenFlurMukhopadhyaySAS2012Ex104.jar','ChenFlurMukhopadhyaySAS2012Ex104', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex2.20/ChenFlurMukhopadhyaySAS2012Ex220.jar','ChenFlurMukhopadhyaySAS2012Ex220', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-counterex1a/AliasDarteFeautrierGonnordSAS2010counterex1a.jar','AliasDarteFeautrierGonnordSAS2010counterex1a', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/CookSeeZuleger-TACAS2013-Fig1/CookSeeZulegerTACAS2013Fig1.jar','CookSeeZulegerTACAS2013Fig1', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/CookSeeZuleger-TACAS2013-Fig7a/CookSeeZulegerTACAS2013Fig7a.jar','CookSeeZulegerTACAS2013Fig7a', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChenFlurMukhopadhyay-SAS2012-Ex1.02/ChenFlurMukhopadhyaySAS2012Ex102.jar','ChenFlurMukhopadhyaySAS2012Ex102', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-random1d-2/AliasDarteFeautrierGonnordSAS2010random1d2.jar','AliasDarteFeautrierGonnordSAS2010random1d2', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/HeizmannHoenickeLeikePodelski-ATVA2013-Fig7/HeizmannHoenickeLeikePodelskiATVA2013Fig7.jar','HeizmannHoenickeLeikePodelskiATVA2013Fig7', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/ChawdharyCookGulwaniSagivYang-ESOP2008-aaron2/ChawdharyCookGulwaniSagivYangESOP2008aaron2.jar','ChawdharyCookGulwaniSagivYangESOP2008aaron2', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/AliasDarteFeautrierGonnord-SAS2010-counterex1b/AliasDarteFeautrierGonnordSAS2010counterex1b.jar','AliasDarteFeautrierGonnordSAS2010counterex1b', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/min_rf/MinRf.jar', 'MinRf', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/GulwaniJainKoskinen-PLDI2009-Fig1/GulwaniJainKoskinenPLDI2009Fig1.jar','GulwaniJainKoskinenPLDI2009Fig1', "loop", None), + + # Sequential while loops + ('termination-suite/termination-crafted-lit/terminating/GulavaniGulwani-CAV2008-Fig1b/GulavaniGulwaniCAV2008Fig1b.jar', 'GulavaniGulwaniCAV2008Fig1b', "loop", None), + ('termination-suite/termination-crafted-lit/terminating/Avery-FLOPS2006-Table1/AveryFLOPS2006Table1.jar','AveryFLOPS2006Table1', "loop", None), + +] + + + +nla_term = [ + + # Works + ('termination-suite/nla-term/prodbin-both-t/prodbinbotht.jar', 'prodbinbotht', "loop", None), + + # ###### + # Dynamite proves with single RF + # ###### + # We learn + k but not -c even though before rounding we have -c most of the time. + ('termination-suite/nla-term/bresenham1-both-t/bresenham1botht.jar', 'bresenham1botht', "loop", None), + # - c + k is a ranking function according to dynamite paper. Before rounding we have this as a subterm + ('termination-suite/nla-term/cohencu5-both-t/cohencu5botht.jar', 'cohencu5botht', "loop", None), + # we learn a - n which is supposed to be a rf but rounding destroys it + ('termination-suite/nla-term/cohencu6-both-t/cohencu6botht.jar', 'cohencu6botht', "loop", None), + # we learn a - n which is supposed to be a rf but rounding destroys it + ('termination-suite/nla-term/cohencu7-both-t/cohencu7botht.jar', 'cohencu7botht', "loop", None), + + # Two Sequential While loops + ('termination-suite/nla-term/dijkstra1-both-t/dijkstra1botht.jar', 'dijkstra1botht', "loop", None), + ('termination-suite/nla-term/dijkstra2-both-t/dijkstra2botht.jar', 'dijkstra2botht', "loop", None), + ('termination-suite/nla-term/dijkstra3-both-t/dijkstra3botht.jar', 'dijkstra3botht', "loop", None), + ('termination-suite/nla-term/hard2-both-t/hard2botht.jar', 'hard2botht', "loop", None), + + # We learn and round 1 * y3 but for some reason the verification fails even thought the dynamite paper says that's + # the RF + ('termination-suite/nla-term/mannadiv-both-t/mannadivbotht.jar', 'mannadivbotht', "loop", anticorr_sumofrelu), + + # We learn k - c before rounding but after rounding the c is gone + ('termination-suite/nla-term/ps2-both-t/ps2botht.jar', 'ps2botht', "loop", None), + ('termination-suite/nla-term/ps3-both-t/ps3botht.jar', 'ps3botht', "loop", None), + ('termination-suite/nla-term/ps4-both-t/ps4botht.jar', 'ps4botht', "loop", None), + + # get's somewhat learned before rounding + ('termination-suite/nla-term/sqrt1-both-t/sqrt1botht.jar', 'sqrt1botht', "loop", None), + + # Nested loops + ('termination-suite/nla-term/fermat1-both-t/fermat1botht.jar', 'fermat1botht', "loop", None), + + # Comes close but rounding does weird things. In Dynamite paper the RF is: -r + 12a +3k + ('termination-suite/nla-term/freire1-both-t/freire1botht.jar', 'freire1botht', "loop", None), + + + + + # ###### + # Dynamite proves with COMPOSITIONAL RF + # ###### + ('termination-suite/nla-term/cohencu1-both-t/cohencu1botht.jar', 'cohencu1botht', "loop", None), + ('termination-suite/nla-term/cohencu2-both-t/cohencu2botht.jar', 'cohencu2botht', "loop", None), + ('termination-suite/nla-term/cohencu3-both-t/cohencu3botht.jar', 'cohencu3botht', "loop", None), + ('termination-suite/nla-term/cohencu4-both-t/cohencu4botht.jar', 'cohencu4botht', "loop", None), + + + # One get's learned k - c + ('termination-suite/nla-term/ps6-both-t/ps6botht.jar', 'ps6botht', "loop", None), + + # -x or k - c... get's somewhat learned before rouding + ('termination-suite/nla-term/ps5-both-t/ps5botht.jar', 'ps5botht', "loop", None), + + # Two sequential while loops + ('termination-suite/nla-term/dijkstra4-both-t/dijkstra4botht.jar', 'dijkstra4botht', "loop", None), + ('termination-suite/nla-term/dijkstra5-both-t/dijkstra5botht.jar', 'dijkstra5botht', "loop", None), + ('termination-suite/nla-term/dijkstra6-both-t/dijkstra6botht.jar', 'dijkstra6botht', "loop", None), + ('termination-suite/nla-term/hard-both-t/hardbotht.jar', 'hardbotht', "loop", None), + + # + ('termination-suite/nla-term/geo3-both-t/geo3botht.jar', 'geo3botht', "loop", None), + ('termination-suite/nla-term/geo1-both-t/geo1botht.jar', 'geo1botht', "loop", None), + ('termination-suite/nla-term/geo2-both-t/geo2botht.jar', 'geo2botht', "loop", None), + ('termination-suite/nla-term/egcd3-both-t/egcd3botht.jar', 'egcd3botht', "loop", None), + ('termination-suite/nla-term/prod4br-both-t/prod4brbotht.jar', 'prod4brbotht', "loop", None), + ('termination-suite/nla-term/egcd-both-t/egcdbotht.jar', 'egcdbotht', "loop", None), + + + #unknown + ('termination-suite/nla-term/egcd2-both-t/egcd2botht.jar', 'egcd2botht', "loop", None), + +] + +datatype_bm = [ + ('examples/PaperEx1/PaperEx1.jar', 'classes.PaperEx1', 'removeNegative', None), +] + +lst ="AliasDarteFeautrierGonnordSAS201CookSeeZulegerTACAS2013Fig8a0speedpldi4 ChawdharyCookGulwaniSagivYangESOP2008easy1 AliasDarteFeautrierGonnordSAS2010easy1 BradleyMannaSipmaCAV2005Fig1 LeikeHeizmannTACAS2014Ex7 AliasDarteFeautrierGonnordSAS2010easy21 ChenFlurMukhopadhyaySAS2012Ex210 ChenFlurMukhopadhyaySAS2012Ex308 Aviad AliasDarteFeautrierGonnordSAS2010ndecr GulavaniGulwaniCAV2008Fig1c HeizmannHoenickeLeikePodelskiATVA2013Fig8 CookSeeZulegerTACAS2013Fig8amodified Genady HeizmannHoenickeLeikePodelskiATVA2013Fig9 ChenFlurMukhopadhyaySAS2012Ex219 ChenFlurMukhopadhyaySAS2012Ex207 LeikeHeizmannWST2014Ex9 ChawdharyCookGulwaniSagivYangESOP2008easy2 LeikeHeizmannTACAS2014Ex1 HeizmannHoenickeLeikePodelskiATVA2013Fig4 LeikeHeizmannTACAS2014Ex9 AliasDarteFeautrierGonnordSAS2010wise AliasDarteFeautrierGonnordSAS2010easy22 ChenFlurMukhopadhyaySAS2012Ex213 HeizmannHoenickeLeikePodelskiATVA2013Fig5modified ChenFlurMukhopadhyaySAS2012Ex301 HeizmannHoenickeLeikePodelskiATVA2013Fig1 PodelskiRybalchenkoTACAS2011Fig1 PodelskiRybalchenkoTACAS2011Fig2 AliasDarteFeautrierGonnordSAS2010speedFails4 HeizmannHoenickeLeikePodelskiATVA2013Fig5 NoriSharmaFSE2013Fig7 ChenFlurMukhopadhyaySAS2012Ex310 GopanRepsCAV2006Fig1a ChenFlurMukhopadhyaySAS2012Ex304 MasseVMCAI2014Ex6 ChenFlurMukhopadhyaySAS2012Ex206 AliasDarteFeautrierGonnordSAS2010speedpldi2 ChenFlurMukhopadhyaySAS2012Ex307 AliasDarteFeautrierGonnordSAS2010speedpldi3 ChenFlurMukhopadhyaySAS2012Ex208 ChenFlurMukhopadhyaySAS2012Ex221 BrockschmidtCookFuhsCAV2013Introduction AliasDarteFeautrierGonnordSAS2010terminate PodelskiRybalchenkoVMCAI2004Ex2 HeizmannHoenickeLeikePodelskiATVA2013Fig2 GulavaniGulwaniCAV2008Fig1a ChenFlurMukhopadhyaySAS2012Ex305 ChenFlurMukhopadhyaySAS2012Ex401 KroeningSharyginaTsitovichWintersteigerCAV2010Fig1 CookSeeZulegerTACAS2013Fig8b MasseVMCAI2014Fig1a ChenFlurMukhopadhyaySAS2012Ex216 AliasDarteFeautrierGonnordSAS2010cousot9 ChenFlurMukhopadhyaySAS2012Ex303 ColonSipmaTACAS2001Fig1 ChenFlurMukhopadhyaySAS2012Fig1 NoriSharmaFSE2013Fig8 ChenFlurMukhopadhyaySAS2012Ex218 ChenFlurMukhopadhyaySAS2012Ex209 LarrazOliverasRodriguezCarbonellRubioFMCAD2013Fig1 UrbanWST2013Fig2 AliasDarteFeautrierGonnordSAS2010wcet2 BrockschmidtCookFuhsCAV2013Fig9a AliasDarteFeautrierGonnordSAS2010complex AliasDarteFeautrierGonnordSAS2010while2 AliasDarteFeautrierGonnordSAS2010loops PodelskiRybalchenkoLICS2004Fig1 BrockschmidtCookFuhsCAV2013Fig1 ChenFlurMukhopadhyaySAS2012Ex201 LeikeHeizmannTACAS2014Fig1 ChenFlurMukhopadhyaySAS2012Ex101" + + +if __name__ == '__main__': + print(len(sv_comp_termination)) + print(len(lst.split(" "))) + + sv_ = list(map(lambda x: x[1], sv_comp_termination)) + ss_ = lst.split(" ") + + print([x for x in sv_ if x not in ss_]) + exit(0) + diff --git a/src/ebmc/ranking-function-synthesis/nuterm/strategies.py b/src/ebmc/ranking-function-synthesis/nuterm/strategies.py new file mode 100644 index 000000000..f6c469ad5 --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/strategies.py @@ -0,0 +1,1687 @@ +import sys +import time +import os +import pathlib +import psutil + +import json + +import torch + +from loopheads import get_loop_heads +from models.SumOfRelu2 import SumOfRelu2 + +from termination import run_termination +from termination import tracing +from termination import run_learning + +from models.SingleLinearWithRelu import SingleLinearWithRelu +from models.SingleLinear import SingleLinear +from models.TwoLayerRelu import TwoLayerRelu +from models.SumOfRelu import SumOfRelu +from models.SumOfLinearsWithLex import SumOfLinearsWithLex + +import torch.nn.functional as F + +from learning import * + +from javachecker import * + +import sympy as sp +import numpy as np +from torch import nn + +from utils import add_new_traces, get_and_layout_traces, get_and_layout_traces_mvgaussian + +import subprocess +import re + +tracing_seed = None + +processes = psutil.cpu_count(logical=False) + + +def pos_neg_unary_init(m: torch.nn.Linear): + lst = [] + for x in m.weight.data: + l = [] + for y in x: + if y > 0.2: + l.append(1.0) + elif y < -0.2: + l.append(-1.0) + else: + l.append(0.0) + lst.append(torch.Tensor(l)) + + res = torch.stack(lst) + return res + + +# =================================== +# - strategies +# =================================== +def anticorr_sumofrelu(vcds, overwrite_sampling=None, seed=None): + + if seed is not None: + torch.manual_seed(seed) + + print("Running with {}".format(('pairanticorr' if overwrite_sampling is None else overwrite_sampling))) + try: + loop_heads = get_loop_heads(jarfile, classname, methodname) + assert len(loop_heads) <= 2 + #print(loop_heads) + #loop_offset = loop_heads[0] + except Exception as e: + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': str(e)} + limit = 100 + if force_sample_size is None: + samples = 1000 + else: + samples = force_sample_size + + trace, model, decreases, invar, = None, None, None, None + res = {'program': jarfile, 'function': methodname, 'class': classname, + 'learning': None, 'tracing': None, 'checking': None} + try: + res['tracing'], trace = tracing(jarfile, classname, methodname, + samples, limit, loop_heads, + seed, ('pairanticorr' if overwrite_sampling is None else overwrite_sampling), + num_processes=processes) + except Exception as e: + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': str(e)} + input_vars = trace.get_pos_identifiers(frame=5) + input_dim = len(input_vars) + + input_before = [] + input_after = [] + + # creating data and model with info from trace + + for head in loop_heads: + try: + head_input_before, head_input_after = trace.pair_all_traces_multi_as_tensor(head, loop_heads) # TODO CHANGE THIS + except RuntimeError as e: + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': str(e)} + + input_before.append(head_input_before) + input_after.append(head_input_after) + + + #print(input_before[0]) + #print(input_after[0]) + + + + res['checking'] = .0 + res['learning'] = .0 + # Learning + for t in range(10): + + model = SumOfRelu(len(trace.get_pos_identifiers()), + n_out=len(loop_heads), + n_summands=5, + input_vars=input_vars) + optimiser = optim.AdamW(model.parameters(), lr=.05) + + print('=' * 40) + print(' - Learning') + print('=' * 40) + learn_start = time.time() + loss = None + + for iteration in range(1000): + optimiser.zero_grad() + + output_before = [] + output_after = [] + for i in range(len(loop_heads)): + output_before.append(model(input_before[i])) + output_after.append(model(input_after[i])) + + loss = torch.tensor([]) + for i in range(len(loop_heads)): + for j in range(0, i): + loss = torch.cat((loss, F.relu(output_after[j] - output_before[j])[:, j]), 0) + loss = torch.cat((loss, F.relu(output_after[i] - output_before[i] + 1)[:, i]), 0) + + loss = loss.mean() + if loss == 0.: + res["iteration"] = iteration + break + + loss.backward() + optimiser.step() + + res['learning'] += time.time() - learn_start + print("loss: {}".format(loss.item())) + + + print('=' * 40) + print(' - Rounding') + print('=' * 40) + S = 1 + W = model.fc1.weight + for row in W: + for x in row: + while (2 * S < abs(x)): + S = 2 * S + + if model.fc1.bias is not None: + b = model.fc1.bias + for x in b: + while (2 * S < abs(x)): + S = 2 * S + else: + b = torch.zeros(model.fc1.out_features) + + + W_round = (model.fc1.weight.data.numpy() / S).round() + b_round = (b.data.numpy() / S).round() + + symvars = sp.Matrix(sp.symbols(input_vars, real=True)) + symorig = (W * symvars + b).applyfunc(sp.Function('relu')) + symround = (W_round * symvars + b_round).applyfunc(sp.Function('relu')) + for i, e in enumerate(symorig): + print(i, e.xreplace({n: round(n, 2) for n in e.atoms(sp.Number)})) + print('After rounding') + for i, e in enumerate(symround): + print(i, e) + + print('=' * 40) + print(' - Checking') + print('=' * 40) + check_start = time.time() + + n_summands = model.n_summands + lexiW = [] + lexib = [] + lexiout = [] + for i in range(0, len(loop_heads)): + lexiW.append(W_round[n_summands*i:n_summands*(i+1)]) + lexib.append(b_round[n_summands*i:n_summands*(i+1)]) + lexiout.append(torch.ones(1, n_summands)) + + res['decrease'], res['invar'], cex = check_sum_of_relu(jarfile, classname, methodname, + loop_heads, input_vars, + lexiout, lexiW, lexib) + + if (res['decrease']): + print("Termination was proven.") + print('YES') + else: + print('Not yet.') + print(cex) + + res['checking'] += time.time() - check_start + + if (res['decrease']): + break + + print('=' * 40) + print(' - Randomising') + print('=' * 40) + seen = set() + for i, row in enumerate(W_round): + row = tuple(row.tolist()) + if row in seen: + nn.init.normal_(model.fc1.weight[i]) + e = symvars.dot(model.fc1.weight.data.numpy()[i]) + print("reinitialising {} to {}".format(i, e.xreplace( + {n: round(n, 2) for n in e.atoms(sp.Number)}))) + else: + seen.add(row) + + + return res + + + +# =================================== +# - strategies +# =================================== +def single_linear_with_relu(vcds, overwrite_sampling=None, strat_args=None): + print("Running with {}".format(('pairanticorr' if overwrite_sampling is None else overwrite_sampling))) + try: + loop_heads = get_loop_heads(jarfile, classname, methodname) + assert len(loop_heads) <= 2 + print(loop_heads) + # loop_offset = loop_heads[0] + except Exception as e: + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': str(e)} + limit = 100 + if force_sample_size is None: + samples = 1000 + else: + samples = force_sample_size + + trace, model, decreases, bounded, = None, None, None, None + res = {'program': jarfile, 'function': methodname, 'class': classname, + 'learning': None, 'tracing': None, 'checking': None} + + try: + res['tracing'], trace = tracing(jarfile, classname, methodname, + samples, limit, loop_heads, + tracing_seed, + ('pairanticorr' if overwrite_sampling is None else overwrite_sampling), + num_processes=processes) + except Exception as e: + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': str(e)} + + input_vars = trace.get_pos_identifiers(frame=5) + input_dim = len(input_vars) + + input_before = [] + input_after = [] + + # creating data and model with info from trace + + for head in loop_heads: + try: + head_input_before, head_input_after = trace.pair_all_traces_multi_as_tensor(head, + loop_heads) # TODO CHANGE THIS + except RuntimeError as e: + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': str(e)} + + input_before.append(head_input_before) + input_after.append(head_input_after) + model = SumOfRelu(len(trace.get_pos_identifiers()), + n_out=len(loop_heads), + n_summands=1, + input_vars=input_vars) + + res['checking'] = .0 + res['learning'] = .0 + # Learning + optimiser = optim.AdamW(model.parameters(), lr=.05) + for t in range(2): + print('=' * 40) + print(' - Learning') + print('=' * 40) + learn_start = time.time() + loss = None + for iteration in range(1000): + optimiser.zero_grad() + + output_before = [] + output_after = [] + for i in range(len(loop_heads)): + output_before.append(model(input_before[i])) + output_after.append(model(input_after[i])) + + loss = torch.tensor([]) + for i in range(len(loop_heads)): + for j in range(0, i): + loss = torch.cat((loss, F.relu(output_after[j] - output_before[j])[:, j]), 0) + loss = torch.cat((loss, F.relu(output_after[i] - output_before[i] + 1)[:, i]), 0) + + loss = loss.mean() + if loss == 0.: + res["iteration"] = iteration + break + + loss.backward() + optimiser.step() + + res['learning'] += time.time() - learn_start + print("loss:", loss.item()) + + print('=' * 40) + print(' - Rounding') + print('=' * 40) + S = 1 + W = model.fc1.weight + for row in W: + for x in row: + while (2 * S < abs(x)): + S = 2 * S + + if model.fc1.bias is not None: + b = model.fc1.bias + for x in b: + while (2 * S < abs(x)): + S = 2 * S + else: + b = torch.zeros(model.fc1.out_features) + + W_round = (model.fc1.weight.data.numpy() / S).round() + b_round = (b.data.numpy() / S).round() + + symvars = sp.Matrix(sp.symbols(input_vars, real=True)) + symorig = (W * symvars).applyfunc(sp.Function('relu')) + symround = (W_round * symvars).applyfunc(sp.Function('relu')) + for i, e in enumerate(symorig): + print(i, e.xreplace({n: round(n, 2) for n in e.atoms(sp.Number)})) + print('After rounding') + for i, e in enumerate(symround): + print(i, e) + + print('=' * 40) + print(' - Checking') + print('=' * 40) + check_start = time.time() + + n_summands = model.n_summands + lexiW = [] + lexib = [] + for i in range(0, len(loop_heads)): + lexiW.append(W_round[n_summands * i:n_summands * (i + 1)]) + lexib.append(b_round[n_summands * i:n_summands * (i + 1)]) + + res['decrease'], res['bounded'], cex = check_sum_of_relu(jarfile, classname, methodname, + loop_heads, input_vars, + lexiW, lexib) + + if (res['decrease']): + print("Termination was proven.") + print('YES') + else: + print('Not yet.') + + res['checking'] += time.time() - check_start + + if (res['decrease']): + break + + print('=' * 40) + print(' - Randomising') + print('=' * 40) + seen = set() + for i, row in enumerate(W_round): + row = tuple(row.tolist()) + if row in seen: + nn.init.normal_(model.fc1.weight[i]) + e = symvars.dot(model.fc1.weight.data.numpy()[i]) + print("reinitialising {} to {}".format(i, e.xreplace( + {n: round(n, 2) for n in e.atoms(sp.Number)}))) + else: + seen.add(row) + + return res + + + +def i40_rank(vcds, overwrite_sampling=None): + try: + loop_heads = get_loop_heads(jarfile, classname, methodname) + assert len(loop_heads) == 1 + + # loop_offset = loop_heads[0] + except Exception as e: + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': str(e)} + + limit = None + if force_sample_size is None: + samples = 1000 + else: + samples = force_sample_size + trace, model, decreases, bounded, = None, None, None, None + res = {'program': jarfile, 'function': methodname, 'class': classname, + 'learning': None, 'tracing': None, 'checking': None, 'sample_size': samples} + + res['tracing'], trace = tracing(jarfile, classname, methodname, + samples, limit, loop_heads, + tracing_seed, ('pairanticorr' if overwrite_sampling is None else overwrite_sampling), num_processes=processes) + + input_vars = trace.get_pos_identifiers(frame=5) + + + # creating data and model with info from trace + + input_before, input_after = trace.pair_all_traces_multi_as_tensor(loop_heads[0], loop_heads) # TODO CHANGE THIS + + model = SingleLinearWithRelu(len(trace.get_pos_identifiers()), input_vars=input_vars) + # model.fc1.weight = torch.nn.Parameter(torch.stack([torch.Tensor([1.0, -1.0])])) + res['checking'] = .0 + res['learning'] = .0 + # Learning + optimiser = optim.AdamW(model.parameters(), lr=.05) + for t in range(2): + print('=' * 40) + print(' - Learning') + print('=' * 40) + learn_start = time.time() + loss = None + for iteration in range(1000): + optimiser.zero_grad() + + output_before = model(input_before) + output_after = model(input_after) + + loss = (F.relu(output_after - output_before + 1)).mean() + # print(t, "loss:", loss.item()) + if loss == 0.: + res["iteration"] = iteration + break + + loss.backward() + optimiser.step() + + res['learning'] += time.time() - learn_start + print("loss:", loss.item()) + + print('=' * 40) + print(' - Rounding') + print('=' * 40) + S = 1 + W = model.fc1.weight + + if model.fc1.bias is not None: + b = model.fc1.bias + else: + b = torch.zeros(model.fc1.out_features) + + W_round = (model.fc1.weight.data.numpy() / S).round() + b_round = (b.data.numpy() / S).round() + + symvars = sp.Matrix(sp.symbols(input_vars, real=True)) + symorig = (W * symvars).applyfunc(sp.Function('relu')) + symround = (W_round * symvars).applyfunc(sp.Function('relu')) + for i, e in enumerate(symorig): + print(i, e.xreplace({n: round(n, 2) for n in e.atoms(sp.Number)})) + print('After rounding') + for i, e in enumerate(symround): + print(i, e) + + + print('=' * 40) + print(' - Checking') + print('=' * 40) + check_start = time.time() + + + n_summands = 1 + lexiW = [] + lexib = [] + for i in range(0, len(loop_heads)): + lexiW.append(W_round[n_summands * i:n_summands * (i + 1)]) + lexib.append(b_round[n_summands * i:n_summands * (i + 1)]) + + res['decrease'], res['bounded'], cex = check_sum_of_relu(jarfile, classname, methodname, + loop_heads, input_vars, + lexiW, lexib) + + + if (not res['decrease']): + print('Not yet.') + else: + print("Termination was proven.") + print('YES') + + res['checking'] += time.time() - check_start + + if (res['decrease']): + break + + print('=' * 40) + print(' - Randomising') + print('=' * 40) + seen = set() + for i, row in enumerate(W_round): + row = tuple(row.tolist()) + if row in seen: + nn.init.normal_(model.fc1.weight[i]) + e = symvars.dot(model.fc1.weight.data.numpy()[i]) + print("reinitialising {} to {}".format(i, e.xreplace( + {n: round(n, 2) for n in e.atoms(sp.Number)}))) + else: + seen.add(row) + return res + + +# multiply by two before calling .round() +def anticorr_sumofrelu2_cegsloop(vcds, overwrite_sampling=None, seed=None, strat_args=None): + if seed is not None: + torch.manual_seed(seed) + + sampling_strategy = "pairanticorr" + if overwrite_sampling is not None: + sampling_strategy = overwrite_sampling + + limit = 1000 + if force_sample_size is None: + samples = 500 + else: + samples = force_sample_size + + print("Running with {} and {} samples of max length {}.".format(sampling_strategy, samples, limit)) + try: + loop_heads = get_loop_heads(jarfile, classname, methodname) + if len(loop_heads) > 2: + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': "More than two loops"} + except Exception as e: + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': str(e)} + + trace, model, decreases, invar, = None, None, None, None + res = {'program': jarfile, 'function': methodname, 'class': classname, 'supported': None, 'trace_max_length': limit, + 'num_samples': samples} + + tracing_seed = seed + + try: + r = get_and_layout_traces(jarfile, classname, methodname, samples, limit, + loop_heads, tracing_seed, res, sampling_strategy) + + input_vars, input_before, input_after = r + except Exception as e: + print("Probably too few samples.") + res["error_cause"] = "Sampling did not produce any data pairs." + return res + + print('first observation: ', input_before[0][0]) + print("vars {}".format(input_vars)) + + res["num_pairs"] = list(map(lambda x: x.size()[0], input_before)) + + n_loop_heads = len(loop_heads) + + res['checking'] = .0 + res['learning'] = .0 + # Learning + + for t in range(20): + + model = SumOfRelu2(len(input_vars), + n_out=n_loop_heads, + n_summands=5) + optimiser = optim.AdamW(model.parameters(), lr=.05) + + print('=' * 40) + print(' - Learning') + print('=' * 40) + learn_start = time.time() + loss = None + for x in range(len(loop_heads)): + print("{} datapoints for head {}".format(input_before[x].size()[0], x)) + + for iteration in range(1000): + + optimiser.zero_grad() + + loss = torch.tensor([]) + for i in range(n_loop_heads): + output_before = model(input_before[i]) + output_after = model(input_after[i]) + loss = torch.cat((loss, F.relu(output_after[i] - output_before[i] + 1)), 0) + for j in range(0, i): + loss = torch.cat((loss, F.relu(output_after[j] - output_before[j])), 0) + + loss = loss.mean() + #print("loss: {}".format(loss.item())) + if loss == 0.: + res["iteration"] = iteration + break + + loss.backward() + optimiser.step() + + res['learning'] += time.time() - learn_start + print("loss:", loss.item()) + + Ses = [.5, 1.] + + for S in Ses: + print('=' * 40) + print(' - Rounding with S =', S) + print('=' * 40) + + W_to_check = [] + b_to_check = [] + out_to_check = [] + for i in range(len(loop_heads)): + print(' ' + '=' * 38) + print(' - Lexicographic function ' + str(i)) + print(' ' + '=' * 38) + W = model.fc1[i].weight + if model.fc1[i].bias is not None: + b = model.fc1[i].bias + else: + b = torch.zeros(model.fc1[i].out_features) + out = model.fc2[i].weight + + W_round = (W.data.numpy() / S).round() + b_round = (b.data.numpy() / S).round() + out_round = (out.data.numpy() / S).round() + + symvars = sp.Matrix(sp.symbols(input_vars, real=True)) + symorig = (W * symvars + b[:, None]).applyfunc(sp.Function('relu')) + symround = (W_round * symvars + b_round[:, None]).applyfunc(sp.Function('relu')) + # print(out[0,1].item()) + for j, e in enumerate(symorig): + print(' ', j, + (round(out[0, j].item(), 2) * e).xreplace({n: round(n, 2) for n in e.atoms(sp.Number)})) + print(' After rounding') + for j, e in enumerate(symround): + print(' ', j, (out_round[0, j] * e)) + + W_to_check.append(W_round) + b_to_check.append(b_round) + out_to_check.append(out_round) + + print(' ' + '=' * 40) + print(' - Checking') + print(' ' + '=' * 40) + check_start = time.time() + + try: + res['decrease'], res['invar'], cex = check_sum_of_relu(jarfile, classname, methodname, + loop_heads, input_vars, + out_to_check, W_to_check, b_to_check) + except RuntimeError as e: + res.pop('checking') + res['error'] = str(e) + return res + + if res['decrease']: + print("Termination was proven.") + print('YES') + break + else: + print('Not yet.') + print(cex) + + if not cex == {}: + + dt = {"arguments": []} + for x in input_vars: + if x in cex: + dt["arguments"].append(cex[x]) + + r = None + res_tmp = res + try: + + cegs_file = "cex.json" + with open(cegs_file, "w+") as f: + json.dump(dt, f) + cegs_samples=100 + r = add_new_traces(input_before, input_after, jarfile, classname, methodname, cegs_samples, limit, + loop_heads, tracing_seed, res, cegs_file=cegs_file) + input_vars, input_before, input_after = r + except Exception as e: + print(e) + print("CEGS loop did not produce usable samples.") + res = res_tmp + + + + res['checking'] += time.time() - check_start + + if (res['decrease']): + break + + print('=' * 40) + print(' - Randomising') + print('=' * 40) + for i in range(0, n_loop_heads): + print(' ' + '=' * 38) + print(' - Lexicographic function ' + str(i)) + print(' ' + '=' * 38) + seen = set() + for j, row in enumerate(W_round): + row = tuple(row.tolist()) + if row in seen: + nn.init.normal_(model.fc1[i].weight[j]) + e = symvars.dot(model.fc1[i].weight.data.numpy()[j]) + print(" reinitialising {} to {}".format(j, e.xreplace( + {n: round(n, 2) for n in e.atoms(sp.Number)}))) + else: + seen.add(row) + + return res + + +def check_ranking_function(verilog_files, candidate): + check_cmd = ['ebmc', '--verbosity', '0', '--json-result', '/dev/stdout', + '--ranking-function', candidate] + check_cmd.extend(verilog_files) + result = subprocess.check_output(check_cmd, universal_newlines=True) + json_result = json.loads(result) + print(result) + return json_result["properties"][0]["status"] == "HOLDS" + + +def anticorr_sumofrelu2(verilog_files, vcds, overwrite_sampling=None, seed=None, strat_args=None): + if seed is not None: + torch.manual_seed(seed) + + sampling_strategy = "pairanticorr" + if overwrite_sampling is not None: + sampling_strategy = overwrite_sampling + + limit = 1000 + samples = len(vcds) + + print("Running with {} and {} samples of max length {}.".format(sampling_strategy, samples, limit)) + trace, model, decreases, invar, = None, None, None, None + res = {'supported': None, 'trace_max_length': limit, 'num_samples': samples} + + tracing_seed = seed + + r = get_and_layout_traces(vcds, limit, tracing_seed, res) + + input_vars, input_before, input_after = r + + #print('first observation: ', input_before[0][0]) + #print("vars {}".format(input_vars)) + + res["num_pairs"] = list(map(lambda x: x.size()[0], input_before)) + + n_loop_heads = 1 + + res['checking'] = .0 + res['learning'] = .0 + # Learning + + for t in range(20): + + model = SumOfRelu2(len(input_vars), + n_out=n_loop_heads, + n_summands=2) + optimiser = optim.AdamW(model.parameters(), lr=.05) + + print('=' * 40) + print(' - Learning') + print('=' * 40) + learn_start = time.time() + loss = None + for iteration in range(1000): + + optimiser.zero_grad() + + # output_before = [] + # output_after = [] + # for i in range(len(loop_heads)): + # output_before.append(model(input_before[i])) + # output_after.append(model(input_after[i])) + + loss = torch.tensor([]) + for i in range(n_loop_heads): + output_before = model(input_before[i]) + output_after = model(input_after[i]) + loss = torch.cat((loss, F.relu(output_after[i] - output_before[i] + 1)), 0) + for j in range(0, i): + loss = torch.cat((loss, F.relu(output_after[j] - output_before[j])), 0) + + loss = loss.mean() + if iteration % 100 == 0: + print("loss at iteration {}: {}".format(iteration, loss.item())) + if loss == 0.: + res["iteration"] = iteration + break + + loss.backward() + optimiser.step() + + res['learning'] += time.time() - learn_start + print("loss:", loss.item()) + + Ses = [2, 1] + + for S in Ses: + print('=' * 40) + print(' - Rounding with S =', S) + print('=' * 40) + + W_to_check = [] + b_to_check = [] + out_to_check = [] + # for i in range(len(loop_heads)): + i = 0 + print(' ' + '=' * 38) + print(' - Lexicographic function ' + str(i)) + print(' ' + '=' * 38) + W = model.fc1[i].weight + if model.fc1[i].bias is not None: + b = model.fc1[i].bias + else: + b = torch.zeros(model.fc1[i].out_features) + out = model.fc2[i].weight + + W_round = (W.data.numpy() * S).round().astype(int) + b_round = (b.data.numpy() * S).round().astype(int) + out_round = (out.data.numpy() * S).round().astype(int) + + print(input_vars) + symvars = sp.Matrix(sp.symbols(input_vars, real=False)) + symorig = (W * symvars + b[:, None]) #.applyfunc(sp.Function('relu')) + symround = sp.simplify(W_round * symvars + b_round[:, None]) #.applyfunc(sp.Function('relu')) + print(out[0,1].item()) + for j, e in enumerate(symorig): + print(' ', j, + (round(out[0, j].item(), 2) * e).xreplace({n: round(n, 2) for n in e.atoms(sp.Number)})) + print(' After rounding') + for j, e in enumerate(symround): + print(' ', j, (out_round[0, j] * e)) + linear_combination = sp.simplify(sum([(out_round[0, j] * e) for j, e in + enumerate(symround)], 0)) + + print(linear_combination) + + W_to_check.append(W_round) + b_to_check.append(b_round) + out_to_check.append(out_round) + + print(' ' + '=' * 40) + print(' - Checking') + print(' ' + '=' * 40) + check_start = time.time() + + try: + print("input_vars: ", input_vars) + print("out_to_check: ", out_to_check) + print("W_to_check: ", W_to_check) + print("b_to_check: ", b_to_check) + ## + ##fun = [] + ##for W,b in zip(W_to_check, b_to_check): + + ## assert np.shape(W)[0] == np.shape(b)[0] + ## SOR = [] + ## for i in range(0, np.shape(W)[0]): + ## SOR.append([int(x) for x in W[i,:]] + [int(b[i])]) + ## fun.append(SOR) + + ##print("fun: ", fun) + ##fun_str = "0"; + ##for layer in fun[0]: + ## fun_str += " + (0"; + ## for (coeff, var) in zip(layer, input_vars): + ## fun_str += " + " + str(coeff) + " * " + var + ## fun_str += ")" + ##print("FUN: ", fun_str) + # out = [[int(x) for x in W[0,:]] for W in out_to_check] + # hidden = [[row for row in relus] for relus in fun] + # print("out:", out) + # print("hidden:", hidden) + ## + # rewritten_relu = re.sub(r'relu\(([^\)]+)\)', r'(\1 > 0 ? \1 : 0)', + # str(linear_combination)) + rewritten_relu = str(linear_combination) + if rewritten_relu.startswith('-'): + rewritten_relu = "0 " + rewritten_relu + res['decrease'] = check_ranking_function(verilog_files, + rewritten_relu) + except RuntimeError as e: + res.pop('checking') + res['error'] = str(e) + return res + + if res['decrease']: + print("Liveness was proven using ranking function", + rewritten_relu, "in iteration", t) + print('YES') + break + else: + print(rewritten_relu, "is not a ranking function") + print('Not yet.') + + res['checking'] += time.time() - check_start + + if (res['decrease']): + break + + print('=' * 40) + print(' - Randomising') + print('=' * 40) + for i in range(0, n_loop_heads): + print(' ' + '=' * 38) + print(' - Lexicographic function ' + str(i)) + print(' ' + '=' * 38) + seen = set() + for j, row in enumerate(W_round): + row = tuple(row.tolist()) + if row in seen: + nn.init.normal_(model.fc1[i].weight[j]) + e = symvars.dot(model.fc1[i].weight.data.numpy()[j]) + print(" reinitialising {} to {}".format(j, e.xreplace( + {n: round(n, 2) for n in e.atoms(sp.Number)}))) + else: + seen.add(row) + + return res + + +def anticorr_sumoflinears_or_concat(verilog_files, vcds, overwrite_sampling=None, seed=None, strat_args=None): + if seed is not None: + torch.manual_seed(seed) + + sampling_strategy = "pairanticorr" + if overwrite_sampling is not None: + sampling_strategy = overwrite_sampling + + limit = 1000 + samples = len(vcds) + + print("Running with {} and {} samples of max length {}.".format(sampling_strategy, samples, limit)) + trace, model, decreases, invar, = None, None, None, None + res = {'supported': None, 'trace_max_length': limit, 'num_samples': samples} + + tracing_seed = seed + + r = get_and_layout_traces(vcds, limit, tracing_seed, res) + + input_vars, input_before, input_after = r + + #print('first observation: ', input_before[0][0]) + #print("vars {}".format(input_vars)) + + res["num_pairs"] = list(map(lambda x: x.size()[0], input_before)) + + n_loop_heads = 1 + + res['checking'] = .0 + res['learning'] = .0 + # Learning + + for t in range(200): + + model = SumOfLinearsWithLex(len(input_vars), + n_out=n_loop_heads, + n_summands=2) + optimiser = optim.AdamW(model.parameters(), lr=.05) + + print('=' * 40) + print(' - Learning') + print('=' * 40) + learn_start = time.time() + loss = None + for iteration in range(1000): + + optimiser.zero_grad() + + # output_before = [] + # output_after = [] + # for i in range(len(loop_heads)): + # output_before.append(model(input_before[i])) + # output_after.append(model(input_after[i])) + + loss = torch.tensor([]) + for i in range(n_loop_heads): + output_before = model(input_before[i]) + output_after = model(input_after[i]) + loss = torch.cat((loss, F.relu(output_after[i] - output_before[i] + 1)), 0) + for j in range(0, i): + loss = torch.cat((loss, F.relu(output_after[j] - output_before[j])), 0) + + loss = loss.mean() + if iteration % 100 == 0: + print("loss at iteration {}: {}".format(iteration, loss.item())) + if loss == 0.: + res["iteration"] = iteration + break + + loss.backward() + optimiser.step() + + res['learning'] += time.time() - learn_start + print("loss:", loss.item()) + + Ses = [2, 1] + + for S in Ses: + print('=' * 40) + print(' - Rounding with S =', S) + print('=' * 40) + + W_to_check = [] + b_to_check = [] + out_to_check = [] + # for i in range(len(loop_heads)): + i = 0 + print(' ' + '=' * 38) + print(' - Lexicographic function ' + str(i)) + print(' ' + '=' * 38) + # W = model.fc1[i].weight + # if model.fc1[i].bias is not None: + # b = model.fc1[i].bias + # else: + # b = torch.zeros(model.fc1[i].out_features) + # out = model.fc2[i].weight + W = model.fc1.weight + if model.fc1.bias is not None: + b = model.fc1.bias + else: + b = torch.zeros(model.fc1.out_features) + + W_round = (W.data.numpy() * S).round().astype(int) + b_round = (b.data.numpy() * S).round().astype(int) + # out_round = (out.data.numpy() * S).round().astype(int) + + print(input_vars) + symvars = sp.Matrix(sp.symbols(input_vars, real=False)) + symorig = (W * symvars + b[:, None]) #.applyfunc(sp.Function('relu')) + symround = sp.simplify(W_round * symvars + b_round[:, None]) #.applyfunc(sp.Function('relu')) + # print(out[0,1].item()) + # for j, e in enumerate(symorig): + # print(' ', j, + # (round(out[0, j].item(), 2) * e).xreplace({n: round(n, 2) for n in e.atoms(sp.Number)})) + print(' After rounding') + # for j, e in enumerate(symround): + # print(' ', j, (out_round[0, j] * e)) + # linear_combination = sum([(out_round[0, j] * e) for j, e in + # enumerate(symround)], 0) + linear_combination = sp.simplify(sum([e for j, e in + enumerate(symround)], 0)) + + print(linear_combination) + + W_to_check.append(W_round) + b_to_check.append(b_round) + # out_to_check.append(out_round) + + print(' ' + '=' * 40) + print(' - Checking') + print(' ' + '=' * 40) + check_start = time.time() + + try: + res['decrease'] = False + + print("input_vars: ", input_vars) + # print("out_to_check: ", out_to_check) + print("W_to_check: ", W_to_check) + print("b_to_check: ", b_to_check) + + concat_operands = [] + for l in W_to_check[0]: + for j, v in enumerate(l): + if v != 0: + concat_operands.append(str(symvars[j])) + if len(concat_operands): + concat_str = "{" + ",".join(concat_operands) + "}" + res['decrease'] = check_ranking_function(verilog_files, + concat_str) + + ## + ##fun = [] + ##for W,b in zip(W_to_check, b_to_check): + + ## assert np.shape(W)[0] == np.shape(b)[0] + ## SOR = [] + ## for i in range(0, np.shape(W)[0]): + ## SOR.append([int(x) for x in W[i,:]] + [int(b[i])]) + ## fun.append(SOR) + + ##print("fun: ", fun) + ##fun_str = "0"; + ##for layer in fun[0]: + ## fun_str += " + (0"; + ## for (coeff, var) in zip(layer, input_vars): + ## fun_str += " + " + str(coeff) + " * " + var + ## fun_str += ")" + ##print("FUN: ", fun_str) + # out = [[int(x) for x in W[0,:]] for W in out_to_check] + # hidden = [[row for row in relus] for relus in fun] + # print("out:", out) + # print("hidden:", hidden) + ## + # rewritten_relu = re.sub(r'relu\(([^\)]+)\)', r'(\1 > 0 ? \1 : 0)', + # str(linear_combination)) + if not res['decrease']: + rewritten_relu = str(linear_combination) + if rewritten_relu.startswith('-'): + rewritten_relu = "0 " + rewritten_relu + res['decrease'] = check_ranking_function(verilog_files, + rewritten_relu) + else: + rewritten_relu = concat_str + + except RuntimeError as e: + res.pop('checking') + res['error'] = str(e) + return res + + if res['decrease']: + print("Liveness was proven using ranking function", + rewritten_relu, "in iteration", t) + print('YES') + break + else: + print(rewritten_relu, "is not a ranking function") + print('Not yet.') + + res['checking'] += time.time() - check_start + + if (res['decrease']): + break + + print('=' * 40) + print(' - Randomising') + print('=' * 40) + for i in range(0, n_loop_heads): + print(' ' + '=' * 38) + print(' - Lexicographic function ' + str(i)) + print(' ' + '=' * 38) + seen = set() + for j, row in enumerate(W_round): + row = tuple(row.tolist()) + if row in seen: + nn.init.normal_(model.fc1.weight[j]) + e = symvars.dot(model.fc1.weight.data.numpy()[j]) + print(" reinitialising {} to {}".format(j, e.xreplace( + {n: round(n, 2) for n in e.atoms(sp.Number)}))) + else: + seen.add(row) + + return res + + +# anticorr_sumofrelu2 with 10 nodes +def anticorr_sumofrelu_dynamic_nodes(vcds, overwrite_sampling=None, seed=None, strat_args=None): + if seed is not None: + torch.manual_seed(seed) + + try: + n_summds = int(strat_args.split("=")[1]) + except Exception as e: + print("Could not get number of summands from strat argument.") + raise e + + sampling_strategy = "pairanticorr" + if overwrite_sampling is not None: + sampling_strategy = overwrite_sampling + + limit = 1000 + if force_sample_size is None: + samples = 1000 + else: + samples = force_sample_size + + print("Running with {} and {} samples of max length {} with {} nodes in the NN.".format(sampling_strategy, samples, + limit, n_summds)) + try: + loop_heads = get_loop_heads(jarfile, classname, methodname) + if len(loop_heads) > 2: + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': "More than two loops"} + except Exception as e: + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': str(e)} + + trace, model, decreases, invar, = None, None, None, None + res = {'program': jarfile, 'function': methodname, 'class': classname, 'supported': None, 'trace_max_length': limit, + 'num_samples': samples, 'nn_nodes': n_summds} + + tracing_seed = seed + + try: + r = get_and_layout_traces(jarfile, classname, methodname, samples, limit, + loop_heads, tracing_seed, res, sampling_strategy) + + input_vars, input_before, input_after = r + except Exception as e: + print("Probably too few samples.") + res["error_cause"] = "Sampling did not produce any data pairs." + return res + + print('first observation: ', input_before[0][0]) + print("vars {}".format(input_vars)) + + res["num_pairs"] = list(map(lambda x: x.size()[0], input_before)) + + n_loop_heads = len(loop_heads) + + res['checking'] = .0 + res['learning'] = .0 + # Learning + + for t in range(20): + + model = SumOfRelu2(len(input_vars), + n_out=n_loop_heads, + n_summands=n_summds) + optimiser = optim.AdamW(model.parameters(), lr=.05) + + print('=' * 40) + print(' - Learning') + print('=' * 40) + learn_start = time.time() + loss = None + for iteration in range(1000): + + optimiser.zero_grad() + + # output_before = [] + # output_after = [] + # for i in range(len(loop_heads)): + # output_before.append(model(input_before[i])) + # output_after.append(model(input_after[i])) + + loss = torch.tensor([]) + for i in range(n_loop_heads): + output_before = model(input_before[i]) + output_after = model(input_after[i]) + loss = torch.cat((loss, F.relu(output_after[i] - output_before[i] + 1)), 0) + for j in range(0, i): + loss = torch.cat((loss, F.relu(output_after[j] - output_before[j])), 0) + + loss = loss.mean() + print("loss: {}".format(loss.item())) + if loss == 0.: + res["iteration"] = iteration + break + + loss.backward() + optimiser.step() + + res['learning'] += time.time() - learn_start + print("loss:", loss.item()) + + Ses = [1., .5] + + for S in Ses: + print('=' * 40) + print(' - Rounding with S =', S) + print('=' * 40) + + W_to_check = [] + b_to_check = [] + out_to_check = [] + for i in range(len(loop_heads)): + print(' ' + '=' * 38) + print(' - Lexicographic function ' + str(i)) + print(' ' + '=' * 38) + W = model.fc1[i].weight + if model.fc1[i].bias is not None: + b = model.fc1[i].bias + else: + b = torch.zeros(model.fc1[i].out_features) + out = model.fc2[i].weight + + W_round = (W.data.numpy() / S).round() + b_round = (b.data.numpy() / S).round() + out_round = (out.data.numpy() / S).round() + + symvars = sp.Matrix(sp.symbols(input_vars, real=True)) + symorig = (W * symvars + b[:, None]).applyfunc(sp.Function('relu')) + symround = (W_round * symvars + b_round[:, None]).applyfunc(sp.Function('relu')) + # print(out[0,1].item()) + for j, e in enumerate(symorig): + print(' ', j, + (round(out[0, j].item(), 2) * e).xreplace({n: round(n, 2) for n in e.atoms(sp.Number)})) + print(' After rounding') + for j, e in enumerate(symround): + print(' ', j, (out_round[0, j] * e)) + + W_to_check.append(W_round) + b_to_check.append(b_round) + out_to_check.append(out_round) + + print(' ' + '=' * 40) + print(' - Checking') + print(' ' + '=' * 40) + check_start = time.time() + + try: + res['decrease'], res['invar'], cex = check_sum_of_relu(jarfile, classname, methodname, + loop_heads, input_vars, + out_to_check, W_to_check, b_to_check) + except RuntimeError as e: + res.pop('checking') + res['error'] = str(e) + return res + + if res['decrease']: + print("Termination was proven.") + print('YES') + break + else: + print('Not yet.') + print(cex) + + res['checking'] += time.time() - check_start + + if (res['decrease']): + break + + print('=' * 40) + print(' - Randomising') + print('=' * 40) + for i in range(0, n_loop_heads): + print(' ' + '=' * 38) + print(' - Lexicographic function ' + str(i)) + print(' ' + '=' * 38) + seen = set() + for j, row in enumerate(W_round): + row = tuple(row.tolist()) + if row in seen: + nn.init.normal_(model.fc1[i].weight[j]) + e = symvars.dot(model.fc1[i].weight.data.numpy()[j]) + print(" reinitialising {} to {}".format(j, e.xreplace( + {n: round(n, 2) for n in e.atoms(sp.Number)}))) + else: + seen.add(row) + + return res + + +# multiply by two before calling .round() +def anticorr_sumofrelu2_gaussian(vcds, overwrite_sampling=None, seed=None, strat_args=None): + if seed is not None: + torch.manual_seed(seed) + + limit = 1000 + if force_sample_size is None: + samples = 1000 + else: + samples = force_sample_size + + print("Running with {} and {} samples".format('mvgaussian' if overwrite_sampling is None else overwrite_sampling, + samples)) + try: + loop_heads = get_loop_heads(jarfile, classname, methodname) + if (len(loop_heads) > 2): + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': "More than two loops"} + except Exception as e: + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': str(e)} + + trace, model, decreases, invar, = None, None, None, None + res = {'program': jarfile, 'function': methodname, 'class': classname, 'supported': None} + + tracing_seed = seed + + try: + input_vars, input_before, input_after = get_and_layout_traces_mvgaussian(jarfile, classname, methodname, + samples, limit, loop_heads, + tracing_seed, res) + except Exception as e: + res['error'] = str(e) + return res + + print('first observation: ', input_before[0][0]) + + n_loop_heads = len(loop_heads) + + res['checking'] = .0 + res['learning'] = .0 + # Learning + for t in range(10): + + model = SumOfRelu2(len(input_vars), + n_out=n_loop_heads, + n_summands=10) + optimiser = optim.AdamW(model.parameters(), lr=.05) + + print('=' * 40) + print(' - Learning') + print('=' * 40) + learn_start = time.time() + loss = None + for iteration in range(1000): + optimiser.zero_grad() + + # output_before = [] + # output_after = [] + # for i in range(len(loop_heads)): + # output_before.append(model(input_before[i])) + # output_after.append(model(input_after[i])) + + loss = torch.tensor([]) + for i in range(n_loop_heads): + output_before = model(input_before[i]) + output_after = model(input_after[i]) + loss = torch.cat((loss, F.relu(output_after[i] - output_before[i] + 1)), 0) + for j in range(0, i): + loss = torch.cat((loss, F.relu(output_after[j] - output_before[j])), 0) + + loss = loss.mean() + if loss == 0.: + res["iteration"] = iteration + break + + loss.backward() + optimiser.step() + + res['learning'] += time.time() - learn_start + print("loss:", loss.item()) + + Ses = [.5, 1.] + + for S in Ses: + print('=' * 40) + print(' - Rounding with S =', S) + print('=' * 40) + + W_to_check = [] + b_to_check = [] + out_to_check = [] + for i in range(len(loop_heads)): + print(' ' + '=' * 38) + print(' - Lexicographic function ' + str(i)) + print(' ' + '=' * 38) + W = model.fc1[i].weight + if model.fc1[i].bias is not None: + b = model.fc1[i].bias + else: + b = torch.zeros(model.fc1[i].out_features) + out = model.fc2[i].weight + + W_round = (W.data.numpy() / S).round() + b_round = (b.data.numpy() / S).round() + out_round = (out.data.numpy() / S).round() + + symvars = sp.Matrix(sp.symbols(input_vars, real=True)) + symorig = (W * symvars + b[:, None]).applyfunc(sp.Function('relu')) + symround = (W_round * symvars + b_round[:, None]).applyfunc(sp.Function('relu')) + # print(out[0,1].item()) + for j, e in enumerate(symorig): + print(' ', j, + (round(out[0, j].item(), 2) * e).xreplace({n: round(n, 2) for n in e.atoms(sp.Number)})) + print(' After rounding') + for j, e in enumerate(symround): + print(' ', j, (out_round[0, j] * e)) + + W_to_check.append(W_round) + b_to_check.append(b_round) + out_to_check.append(out_round) + + print(' ' + '=' * 40) + print(' - Checking') + print(' ' + '=' * 40) + check_start = time.time() + + try: + res['decrease'], res['invar'], cex = check_sum_of_relu(jarfile, classname, methodname, + loop_heads, input_vars, + out_to_check, W_to_check, b_to_check) + except RuntimeError as e: + res.pop('checking') + res['error'] = str(e) + return res + + if res['decrease']: + print("Termination was proven.") + print('YES') + break + else: + print('Not yet.') + print(cex) + + res['checking'] += time.time() - check_start + + if (res['decrease']): + break + + print('=' * 40) + print(' - Randomising') + print('=' * 40) + for i in range(0, n_loop_heads): + print(' ' + '=' * 38) + print(' - Lexicographic function ' + str(i)) + print(' ' + '=' * 38) + seen = set() + for j, row in enumerate(W_round): + row = tuple(row.tolist()) + if row in seen: + nn.init.normal_(model.fc1[i].weight[j]) + e = symvars.dot(model.fc1[i].weight.data.numpy()[j]) + print(" reinitialising {} to {}".format(j, e.xreplace( + {n: round(n, 2) for n in e.atoms(sp.Number)}))) + else: + seen.add(row) + + return res + + +def anticorr_sumofrelu_baseline(vcds, overwrite_sampling=None, seed=None, strat_args=None): + if seed is not None: + torch.manual_seed(seed) + + print("Running with {}".format(('pairanticorr' if overwrite_sampling is None else overwrite_sampling))) + try: + loop_heads = get_loop_heads(jarfile, classname, methodname) + if (len(loop_heads) > 2): + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': "More than two loops"} + except Exception as e: + return {'program': jarfile, 'function': methodname, 'class': classname, 'error': str(e)} + + limit = 100 + if force_sample_size is None: + samples = 1000 + else: + samples = force_sample_size + + trace, model, decreases, invar, = None, None, None, None + res = {'program': jarfile, 'function': methodname, 'class': classname, 'supported': None} + + tracing_seed = seed + + try: + input_vars, input_before, input_after = get_and_layout_traces(jarfile, classname, methodname, + samples, limit, loop_heads, tracing_seed, res) + except Exception as e: + res['error'] = str(e) + return res + + print('first observation: ', input_before[0][0]) + + n_loop_heads = len(loop_heads) + + res['checking'] = .0 + res['learning'] = .0 + # Learning + for t in range(10): + + model = SumOfRelu2(len(input_vars), + n_out=n_loop_heads, + n_summands=1) + optimiser = optim.AdamW(model.parameters(), lr=.05) + + print('=' * 40) + print(' - Learning') + print('=' * 40) + learn_start = time.time() + loss = None + for iteration in range(1000): + optimiser.zero_grad() + + # output_before = [] + # output_after = [] + # for i in range(len(loop_heads)): + # output_before.append(model(input_before[i])) + # output_after.append(model(input_after[i])) + + loss = torch.tensor([]) + for i in range(n_loop_heads): + output_before = model(input_before[i]) + output_after = model(input_after[i]) + loss = torch.cat((loss, F.relu(output_after[i] - output_before[i] + 1)), 0) + for j in range(0, i): + loss = torch.cat((loss, F.relu(output_after[j] - output_before[j])), 0) + + loss = loss.mean() + if loss == 0.: + res["iteration"] = iteration + break + loss.backward() + optimiser.step() + + res['learning'] += time.time() - learn_start + print("loss:", loss.item()) + + Ses = [.5, 1.] + + for S in Ses: + print('=' * 40) + print(' - Rounding with S =', S) + print('=' * 40) + + W_to_check = [] + b_to_check = [] + out_to_check = [] + for i in range(len(loop_heads)): + print(' ' + '=' * 38) + print(' - Lexicographic function ' + str(i)) + print(' ' + '=' * 38) + W = model.fc1[i].weight + if model.fc1[i].bias is not None: + b = model.fc1[i].bias + else: + b = torch.zeros(model.fc1[i].out_features) + out = model.fc2[i].weight + + W_round = (W.data.numpy() / S).round() + b_round = (b.data.numpy() / S).round() + out_round = (out.data.numpy() / S).round() + + symvars = sp.Matrix(sp.symbols(input_vars, real=True)) + symorig = (W * symvars + b[:, None]).applyfunc(sp.Function('relu')) + symround = (W_round * symvars + b_round[:, None]).applyfunc(sp.Function('relu')) + # print(out[0,1].item()) + for j, e in enumerate(symorig): + print(' ', j, + (round(out[0, j].item(), 2) * e).xreplace({n: round(n, 2) for n in e.atoms(sp.Number)})) + print(' After rounding') + for j, e in enumerate(symround): + print(' ', j, (out_round[0, j] * e)) + + W_to_check.append(W_round) + b_to_check.append(b_round) + out_to_check.append(out_round) + + print(' ' + '=' * 40) + print(' - Checking') + print(' ' + '=' * 40) + check_start = time.time() + + try: + res['decrease'], res['invar'], cex = check_sum_of_relu(jarfile, classname, methodname, + loop_heads, input_vars, + out_to_check, W_to_check, b_to_check) + except RuntimeError as e: + res.pop('checking') + res['error'] = str(e) + return res + + if res['decrease']: + print("Termination was proven.") + print('YES') + break + else: + print('Not yet.') + print(cex) + + res['checking'] += time.time() - check_start + + if (res['decrease']): + break + + print('=' * 40) + print(' - Randomising') + print('=' * 40) + for i in range(0, n_loop_heads): + print(' ' + '=' * 38) + print(' - Lexicographic function ' + str(i)) + print(' ' + '=' * 38) + seen = set() + for j, row in enumerate(W_round): + row = tuple(row.tolist()) + if row in seen: + nn.init.normal_(model.fc1[i].weight[j]) + e = symvars.dot(model.fc1[i].weight.data.numpy()[j]) + print(" reinitialising {} to {}".format(j, e.xreplace( + {n: round(n, 2) for n in e.atoms(sp.Number)}))) + else: + seen.add(row) + + return res + diff --git a/src/ebmc/ranking-function-synthesis/nuterm/termination.py b/src/ebmc/ranking-function-synthesis/nuterm/termination.py new file mode 100644 index 000000000..b6303b40f --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/termination.py @@ -0,0 +1,157 @@ +from models.SingleLinearWithRelu import SingleLinearWithRelu +from tracing.trace_handle import Trace +from loopheads import get_loop_heads + +from models.SingleLinear import SingleLinear + +from tracing.utils import * + +import torch.optim as optim +from learning import * + +from tracing.trace_handle import Trace + +from javachecker import * +import time + +import numpy as np +from numpy.linalg import norm + +def tracing(PROGRAM, CLASS, FUNCTION, SAMPLES, limit, LOOP_OFFSET, tracing_seed, sampling_strategy="default", num_processes=1): + # ######### TRACING ########## + print('=' * 40) + print(' - Tracing') + print('=' * 40) + + f = tempfile.NamedTemporaryFile() + print("Traces are being written to {}.".format(f.name)) + + #assert tracing_seed is not None + start = time.time() # process_time excludes the time taken by the traces + + run_tracing(PROGRAM, CLASS, FUNCTION, f.name, SAMPLES, LOOP_OFFSET, seed=tracing_seed, + tracelimit=limit, sampling_strategy=sampling_strategy, num_processes=num_processes) + tracing_time = time.time() - start + + # reading trace + trace = Trace(f.name) + # assert trace.number_of_traces() == SAMPLES,'number of traces should be {}'.format(SAMPLES) + + return tracing_time, trace + + +def cegs_tracing(PROGRAM, CLASS, FUNCTION, SAMPLES, limit, LOOP_OFFSET, tracing_seed, cegs_file, num_processes=1): + # ######### TRACING ########## + print('=' * 40) + print(' - CEGS Tracing') + print('=' * 40) + + TRACE_FILE = 'cegs_test.json' + # assert tracing_seed is not None + start = time.time() # process_time excludes the time taken by the traces + run_cegs_tracing(PROGRAM, CLASS, FUNCTION, TRACE_FILE, SAMPLES, loopheads=LOOP_OFFSET, cegs_file=cegs_file,seed=tracing_seed, + tracelimit=limit, num_processes=num_processes) + tracing_time = time.time() - start + + # reading trace + trace = Trace(TRACE_FILE) + # assert trace.number_of_traces() == SAMPLES,'number of traces should be {}'.format(SAMPLES) + + return tracing_time, trace + +def run_learning(trace, LOOP_OFFSET, MODEL, learning_function, loss_function): + # ######### LEARNING ########## + print('='*40) + print(' - Learning') + print('='*40) + + start = time.time() + model, loss = learning_function(trace, LOOP_OFFSET, Model=MODEL, loss_func=loss_function) + model.print_sympy() + learning_time = time.time() - start + + print("loss: ", loss) + + return learning_time, model, loss + + +def run_checking(PROGRAM, CLASS, FUNCTION, LOOP_OFFSET, model): + + # ########## CHECKING ######## + print('=' * 40) + print(' - Checking') + print('=' * 40) + + S = model.get_round_scaling() + start = time.time() + (decreases, bounded) = check(PROGRAM, + CLASS, + FUNCTION, LOOP_OFFSET, + model.input_vars, + [int(x*S) for x in model.get_round_weights()[0]] + [0]) # Last 0 is for offset translation + + checking_time = time.time() - start + + if (decreases and bounded): + print('Hurray! It decreases and it is bounded') + elif (decreases): + print('Almost. It decreases but it is not bounded') + else: + print('Too bad. That\'s not a ranking function') + + return checking_time, decreases, bounded + + +def run_termination(PROGRAM, CLASS, FUNCTION, loop_offset, SAMPLES, limit, MODEL, learning_function, loss_function, + tracing_seed, sampling_strategy="default"): + # start = time.time() # process_time excludes the time taken by the traces + + trace, model, decreases, bounded, = None, None, None, None + + # ######### LOOP HEADS ####### + # print(PROGRAM) + # LOOP_OFFSET = get_loop_heads(file_path + PROGRAM, CLASS, FUNCTION)[0] + + res = {'program': PROGRAM, 'function': FUNCTION, 'class': CLASS, 'learning': None, 'tracing': None, 'checking': None} + + try: + res['tracing'], trace = tracing(PROGRAM, CLASS, FUNCTION, SAMPLES, limit, loop_offset, tracing_seed, + sampling_strategy=sampling_strategy) + + except Exception as e: + res['error'] = str(e) + return res + + try: + res['learning'], model = run_learning(trace, loop_offset, MODEL, learning_function, loss_function) + + model.print_sympy() + roundweights = model.get_round_weights()[0] + weights = model.get_weights()[0] + res['rounderror'] = norm(weights - roundweights, ord=np.inf) + #max([abs((w - r)/w) for (w,r) in zip(weights, roundweights)]) + # + + # this is implemented in ModelName.print_sympy() + #symvars = sp.Matrix(sp.symbols(model.input_vars, real=True)) + #symorig = model.get_weights() * symvars + #symround = model.get_round_weights() * symvars + #for e in symorig: + # print(e) + #print('After rounding') + #for e in symround: + # print(e) + print('Error: {}'.format(res['rounderror'])) + except Exception as e: + res['error'] = str(e) + return res + + try: + res['checking'], res['decrease'], res['bounded'] = run_checking(PROGRAM, CLASS, FUNCTION, loop_offset, model) + except Exception as e: + res['error'] = str(e) + return res + + return res + + diff --git a/src/ebmc/ranking-function-synthesis/nuterm/tracing/__init__.py b/src/ebmc/ranking-function-synthesis/nuterm/tracing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/ebmc/ranking-function-synthesis/nuterm/tracing/trace_handle.py b/src/ebmc/ranking-function-synthesis/nuterm/tracing/trace_handle.py new file mode 100644 index 000000000..f4ac831d3 --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/tracing/trace_handle.py @@ -0,0 +1,265 @@ +import matplotlib.pyplot as plt +import torch +import itertools +from collections import defaultdict +import vcd.reader +import re + +from .utils import pairing + + +def get_trace_frames_with_loc(frames, loc): + return list(filter(lambda x: x["location"] == loc, frames)) + +def vcd_tokens_to_trace(vcd_tokens): + token = next(vcd_tokens) + assert token.kind is vcd.reader.TokenKind.DATE + token = next(vcd_tokens) + assert token.kind is vcd.reader.TokenKind.TIMESCALE + scopes_stack = [] + vars = {} + token = next(vcd_tokens) + assert token.kind is vcd.reader.TokenKind.SCOPE + scopes_stack.append(token.data) + while True: + token = next(vcd_tokens) + if token.kind is vcd.reader.TokenKind.UPSCOPE: + scopes_stack.pop() + if not len(scopes_stack): + break + elif token.kind is vcd.reader.TokenKind.SCOPE: + scopes_stack.append(token.data) + else: + assert token.kind is vcd.reader.TokenKind.VAR + if token.data.size > 1: + var_without_module = re.sub(r'^[^\.]*\.', '', token.data.reference) + var_with_brackets = re.sub(r'__(\d+)__\.', r'[\1].', var_without_module) + vars[token.data.id_code] = var_with_brackets + print(vars) + token = next(vcd_tokens) + assert token.kind is vcd.reader.TokenKind.ENDDEFINITIONS + eof_reached = False + frames = [] + var_value = {} + constants = {} + token = next(vcd_tokens) + while not eof_reached: + assert token.kind is vcd.reader.TokenKind.CHANGE_TIME + while True: + try: + token = next(vcd_tokens) + except StopIteration as e: + eof_reached = True + break + if token.kind is vcd.reader.TokenKind.CHANGE_SCALAR: + # var_value[vars[token.data.id_code]] = int(token.data.value) + continue + if token.kind is vcd.reader.TokenKind.CHANGE_VECTOR: + var_value[vars[token.data.id_code]] = token.data.value + continue + break + frame = { + "names": list(vars.values()), + # TODO: may need to remove variables instead of hard-coding 0 + "rep": [var_value.get(v, 0) for v in vars.values()] + } + # for k,v in zip(frame["names"], frame["rep"]): + # if not constants.get(k): + # constants[k] = (v, True) + # elif constants[k][1] and constants[k][0] != v: + # constants[k] = (v, False) + frames.append(frame) + # constant_indices = [] + # for j, v in enumerate(vars.values()): + # if constants[v][1]: + # constant_indices.append(j) + # to_erase = sorted(constant_indices, reverse=True) + # for frame in frames: + # for i in to_erase: + # del frame["names"][i] + # del frame["rep"][i] + print(frames) + return {"frames": frames} + +class Trace: + + def __init__(self, vcds): + self.traces = [] + + for vcd_file in vcds: + # load VCD trace + with open(vcd_file, 'rb') as f: + vcd_tokens = vcd.reader.tokenize(f) + self.traces.append(vcd_tokens_to_trace(vcd_tokens)) + + def get_nth_trace(self, n): + return self.traces[n] + + def get_pos_identifiers(self, trace=0, frame=0): + for trace in self.traces: + if len(trace["frames"]) > 0: + return trace["frames"][0]["names"] + assert False + + def get_data_of_nth_frame(self, nth): + trace = self.get_nth_trace(nth) + frames = trace["frames"] + # flt_rep = [list(map(lambda x: float(x), frame["rep"])) for frame in frames] + return frames + + def get_data_for_loc(self, loc): + return list(map(lambda x: get_trace_frames_with_loc(x["frames"], loc), self.traces)) + + def get_loc_matrix_nth_trace(self, nth, loc): + trace = self.get_nth_trace(nth) + frames = trace["frames"] + locs = get_trace_frames_with_loc(frames, loc) + return list(map(lambda x: x["rep"], locs)) + + def get_all_matrices_for_loc(self, loc): + res = [] + for x in range(0, self.number_of_traces()): + res.append(self.get_loc_matrix_nth_trace(x, loc)) + + return res + + def plot_nth_trace(self, n, exclude=None): + if exclude is None: + exclude = [] + trace = self.get_nth_trace(n) + frames = trace["frames"] + flt_rep = [frame["rep"] for frame in frames] + + #plot + fig, ax = plt.subplots() + for i, name in enumerate(frames[5]["names"]): + if name in exclude: + continue + vals = [x[i] for x in flt_rep] + ax.plot(vals, label=name) + ax.legend() + plt.plot() + plt.show() + + def number_of_traces(self): + return len(self.traces) + + def get_pairing_of_nth_trace(self, nth, loc): + mat = self.get_loc_matrix_nth_trace(nth, loc) + return pairing(mat) + + def pair_nth_trace_multi(self, nth): + before, after = [],[] + + outers = [] #locs[:locs.index(loc)] + + trace = self.get_nth_trace(nth) + frames = trace["frames"] + + tmp = [] + for i, frame in enumerate(frames): + # if frame["location"] in outers: + # b, a = pairing(tmp) + # before += b + # after += a + # tmp = [] + # continue + if i <= 1: #frame["location"] == loc: + tmp.append(frame["rep"]) + if i == len(frames) - 1: + b, a = pairing(tmp) + before += b + after += a + + return before, after + + def pair_all_traces_multi(self): + before, after = [],[] + for trc in range(0, self.number_of_traces()): + x, y = self.pair_nth_trace_multi(trc) + before += x + after += y + + return before, after + + def pair_all_traces_multi_as_tensor(self): + before, after = self.pair_all_traces_multi() + print(before) + print(after) + input_before = list(map(lambda x: torch.Tensor(x), before)) + input_after = list(map(lambda x: torch.Tensor(x), after)) + + input_before = torch.stack(input_before) + input_after = torch.stack(input_after) + + return input_before, input_after + + def get_pairing_of_all_traces(self, loc): + tuples = map(lambda x: self.get_pairing_of_nth_trace(x,loc), range(0, self.number_of_traces())) + before, after = [], [] + for x,y in tuples: + before += x + after += y + + return before, after + + def get_pairing_of_all_traces_as_tensors(self, loc): + before, after = self.get_pairing_of_all_traces(loc) + input_before = list(map(lambda x: torch.Tensor(x), before)) + input_after = list(map(lambda x: torch.Tensor(x), after)) + + input_before = torch.stack(input_before) + input_after = torch.stack(input_after) + + return input_before, input_after + + def get_wrangled_traces_as_tensors(self, loc): + min_trace_len = 10 + start_cutoff = 5 + mat = self.get_all_matrices_for_loc(loc) + # remove (very short) short traces + mat = filter(lambda x: len(x) >= min_trace_len, mat) + # remove initialization phase and pair + mat = map(lambda x: x[start_cutoff:], mat) + before, after = [], [] + for x in mat: + b, a = pairing(x) + before += b + after += a + + input_before = list(map(lambda x: torch.Tensor(x), before)) + input_after = list(map(lambda x: torch.Tensor(x), after)) + input_before = torch.stack(input_before) + input_after = torch.stack(input_after) + + return input_before, input_after + + def lexicographic_pairing_as_tensors(self, locs): + + res_dict = defaultdict(list) + + for n_trace in range(0, self.number_of_traces()): + trace = self.get_nth_trace(n_trace) + frames = trace["frames"] + for window_i in range(0, len(frames)-1): + before = frames[window_i] + after = frames[window_i+1] + + transition = tuple(sorted([before["location"], after["location"]])) + res_dict[transition].append((before["rep"], after["rep"])) + + res = {} + + for k, lst in res_dict.items(): + before = [] + after = [] + for bef, af in lst: + before.append(torch.Tensor(bef)) + after.append(torch.Tensor(af)) + + before = torch.stack(before) + after = torch.stack(after) + + res[k] = (before, after) + + return res diff --git a/src/ebmc/ranking-function-synthesis/nuterm/tracing/utils.py b/src/ebmc/ranking-function-synthesis/nuterm/tracing/utils.py new file mode 100644 index 000000000..edd03364f --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/tracing/utils.py @@ -0,0 +1,193 @@ +import os +import subprocess +from typing import List +from typing import Tuple +import torch +import pathlib +import tempfile +import json +import time + +#TODO Use path.join +AGENT_LD_PATH = str(pathlib.Path(__file__).parent.absolute()) + '/../../libs' +GEN_AND_TRACE_JAR = str(pathlib.Path(__file__).parent.absolute()) + '/../../libs/GenAndTrace.jar' + + +def pairing(ls: List) -> Tuple[List, List]: + if ls == []: + return [], [] + before, after = [], [] + for i in range(len(ls) - 1): + before += [ls[i]] + after += [ls[i + 1]] + return before, after + + +def chunks(l, n): + n = max(1, n) + return (l[i:i + n] for i in range(0, len(l), n)) + + +def create_run_command(input_program, class_name, method_name, output_file, sampling_strategy, samples, loopheads=None, + seed=None, tracelimit=None, cegs_file=None): + + environment = dict(os.environ) + + cmd = 'java' + if 'JAVA_HOME' in os.environ: + cmd = '{}/bin/java'.format(environment['JAVA_HOME']) + + jvmti_agent_cmd = None + + if loopheads is None: + jvmti_agent_cmd = '-agentlib:JavaMemTraceAgent=class={},method={},output={}' \ + .format(class_name, method_name, output_file) + else: + jvmti_agent_cmd = '-agentlib:JavaMemTraceAgent=class={},method={},output={},loopheads={}' \ + .format(class_name, method_name, output_file, ':'.join(map(str, loopheads))) + + if tracelimit is not None: + jvmti_agent_cmd = "{},tracelimit={}".format(jvmti_agent_cmd, tracelimit) + + + if cegs_file is None: + command = [cmd, jvmti_agent_cmd, + '-jar', GEN_AND_TRACE_JAR, + '-j', input_program, + '-c', class_name, + '-m', method_name, + '-strategy', sampling_strategy] + else: + command = [cmd, jvmti_agent_cmd, + '-jar', GEN_AND_TRACE_JAR, + '-j', input_program, + '-c', class_name, + '-m', method_name, + '-cegs', cegs_file] + + print(command) + # assert seed is not None + if seed is not None: + command = command + ['-seed', str(seed)] + + command = command + ['-s', str(samples)] + + + return command + + +def run_tracing(input_program, class_name, method_name, output_file, samples, loopheads=None, tracelimit=None, + seed=None, sampling_strategy="default", num_processes=1): + + if num_processes < 1: + num_processes = 1 + if samples < num_processes: + num_processes = 1 + + environment = dict(os.environ) + environment['LD_LIBRARY_PATH'] = AGENT_LD_PATH + + + # ##### multiprocessing + # number of samples might not be exactly the number of requested samples due to rounding + samples_per_process = samples // num_processes + print("Creating {} samples on {} processes. Requested sample size {} actual sample size {}" + .format(samples_per_process, num_processes, samples_per_process * num_processes, samples)) + + #assert seed is not None + processes = [] + for i in range(0, num_processes): + f = tempfile.NamedTemporaryFile() + + run_cmd = create_run_command(input_program, class_name, method_name, f.name, sampling_strategy, + samples_per_process, loopheads=loopheads, seed=seed, tracelimit=tracelimit) + + p = subprocess.Popen(run_cmd, env=environment, stdout=subprocess.PIPE) + processes.append((f, p)) + + print("Running traces with {} processes with {} samples each.".format(num_processes, samples_per_process)) + + start = time.time() + + json_object = None + for f, p in processes: + p.wait() + if json_object is None: + json_object = json.load(f) + else: + json_object["traces"] += json.load(f)["traces"] + f.close() + + if json_object is None: + raise Exception("Json Object is None, something went wrong") + else: + with open(output_file, 'w') as f: + json.dump(json_object, f, indent=4) + + tracing_time = time.time() - start + + print("Tracing and processing results finished in {}.".format(tracing_time)) + + # subprocess.run(command,env=environment) + + +def run_cegs_tracing(input_program, class_name, method_name, output_file, samples, cegs_file, loopheads=None, tracelimit=None, + seed=None, num_processes=1): + + if num_processes < 1: + num_processes = 1 + if samples < num_processes: + num_processes = 1 + + environment = dict(os.environ) + environment['LD_LIBRARY_PATH'] = AGENT_LD_PATH + + print("LH: {}".format(loopheads)) + print("CEGS: {}".format(cegs_file)) + + + # ##### multiprocessing + # number of samples might not be exactly the number of requested samples due to rounding + samples_per_process = samples // num_processes + print("Creating {} samples on {} processes. Requested sample size {} actual sample size {}" + .format(samples_per_process, num_processes, samples_per_process * num_processes, samples)) + #assert seed is not None + processes = [] + for i in range(0, num_processes): + f = tempfile.NamedTemporaryFile(delete=True) + run_cmd = create_run_command(input_program, class_name, method_name, f.name, "pairanticorr", + samples_per_process, loopheads=loopheads, seed=seed, tracelimit=tracelimit, cegs_file=cegs_file) + + print(" ".join(map(str, run_cmd))) + + p = subprocess.Popen(run_cmd, env=environment, stdout=subprocess.PIPE) + + processes.append((f, p)) + + print("Running traces with {} processes with {} samples each.".format(num_processes, samples_per_process)) + + start = time.time() + + json_object = None + for f, p in processes: + p.wait() + + if json_object is None: + json_object = json.load(f) + else: + json_object["traces"] += json.load(f)["traces"] + + f.close() + + + if json_object is None: + raise Exception("Json Object is None, something went wrong") + else: + with open(output_file, 'w') as f: + json.dump(json_object, f, indent=4) + + tracing_time = time.time() - start + + print("Tracing and processing results finished in {}.".format(tracing_time)) + + # subprocess.run(command,env=environment) diff --git a/src/ebmc/ranking-function-synthesis/nuterm/utils.py b/src/ebmc/ranking-function-synthesis/nuterm/utils.py new file mode 100644 index 000000000..da8c540a6 --- /dev/null +++ b/src/ebmc/ranking-function-synthesis/nuterm/utils.py @@ -0,0 +1,90 @@ +import psutil +import torch + +from tracing.trace_handle import Trace +from termination import tracing, cegs_tracing + + + +processes = psutil.cpu_count(logical=False) + + + + +def get_and_layout_traces(vcds, limit, tracing_seed, res): + + trace = Trace(vcds) + + input_vars = trace.get_pos_identifiers(frame=5) + input_dim = len(input_vars) + + input_before = [] + input_after = [] + + # creating data and model with info from trace + try: + head_input_before, head_input_after = trace.pair_all_traces_multi_as_tensor() # TODO CHANGE THIS + except RuntimeError as e: + res['error'] = str(e) + return res + input_before.append(head_input_before) + input_after.append(head_input_after) + + return input_vars, input_before, input_after + + +def add_new_traces(zero_input_before, zero_input_after, jarfile, classname, methodname, samples, limit, loop_heads, tracing_seed, res, cegs_file): + + res['tracing'], trace = cegs_tracing(jarfile, classname, methodname, + samples, limit, loop_heads, + tracing_seed, cegs_file, + num_processes=processes) + + input_vars = trace.get_pos_identifiers(frame=5) + input_dim = len(input_vars) + + input_before = [] + input_after = [] + + # creating data and model with info from trace + + for index,head in enumerate(loop_heads): + try: + head_input_before, head_input_after = trace.pair_all_traces_multi_as_tensor(head, + loop_heads) # TODO CHANGE THIS + except RuntimeError as e: + res['error'] = str(e) + return res + + input_before.append(torch.cat((zero_input_before[index], head_input_before))) + input_after.append(torch.cat((zero_input_after[index], head_input_after))) + + return input_vars, input_before, input_after + + +def get_and_layout_traces_mvgaussian(jarfile, classname, methodname, samples, limit, loop_heads, tracing_seed, res): + res['tracing'], trace = tracing(jarfile, classname, methodname, + samples, limit, loop_heads, + tracing_seed, sampling_strategy='gaussian', + num_processes=processes) + + input_vars = trace.get_pos_identifiers(frame=5) + input_dim = len(input_vars) + + input_before = [] + input_after = [] + + # creating data and model with info from trace + + for head in loop_heads: + try: + head_input_before, head_input_after = trace.pair_all_traces_multi_as_tensor(head, + loop_heads) # TODO CHANGE THIS + except RuntimeError as e: + res['error'] = str(e) + return res + input_before.append(head_input_before) + input_after.append(head_input_after) + + return (input_vars, input_before, input_after) +